diff options
Diffstat (limited to 'meta-arm/meta-arm-bsp/recipes-kernel/linux')
122 files changed, 23710 insertions, 0 deletions
diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/README.md b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/README.md new file mode 100644 index 0000000000..ba61ca323f --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/README.md @@ -0,0 +1,4 @@ +Arm platforms BSPs +================== + +This directory contains Arm platforms definitions and configuration for Linux. diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/arm64.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/arm64.cfg new file mode 100644 index 0000000000..62c0238786 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/arm64.cfg @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: MIT +# +# ARM64 +# +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_ARCH_VEXPRESS=y + +# +# Bus support +# +CONFIG_ARM_AMBA=y + +# +# Bus devices +# +CONFIG_VEXPRESS_CONFIG=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/corstone1000-standard.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/corstone1000-standard.scc new file mode 100644 index 0000000000..9278ce11a5 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/corstone1000-standard.scc @@ -0,0 +1,5 @@ +define KMACHINE corstone1000 +define KTYPE standard +define KARCH arm64 + +kconf hardware corstone1000/base.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/corstone1000/base.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/corstone1000/base.cfg new file mode 100644 index 0000000000..aea1d84e59 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/corstone1000/base.cfg @@ -0,0 +1,29 @@ +CONFIG_LOCALVERSION="-yocto-standard" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_LOG_BUF_SHIFT=12 +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set +# CONFIG_BLK_DEV_BSG is not set +CONFIG_ARM64=y +CONFIG_THUMB2_KERNEL=y +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_VFP=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_TMPFS=y +# CONFIG_WLAN is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y +CONFIG_MAILBOX=y +# CONFIG_CRYPTO_HW is not set diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32-standard.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32-standard.scc new file mode 100644 index 0000000000..656902745d --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32-standard.scc @@ -0,0 +1,8 @@ +define KMACHINE fvp-arm32 +define KTYPE standard +define KARCH arm + +include ktypes/standard/standard.scc + +include fvp-arm32.scc + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32.scc new file mode 100644 index 0000000000..ff7ce57295 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32.scc @@ -0,0 +1,14 @@ +include features/input/input.scc +include features/net/net.scc +include cfg/timer/no_hz.scc +include cfg/virtio.scc + +kconf hardware fvp-arm32/fvp-board.cfg +kconf hardware fvp-arm32/fvp-features.cfg +kconf hardware fvp/fvp-net.cfg +kconf hardware fvp/fvp-rtc.cfg +kconf hardware fvp/fvp-serial.cfg +kconf hardware fvp/fvp-cfi.cfg +kconf hardware fvp/fvp-drm.cfg +kconf hardware fvp/fvp-timer.cfg +kconf hardware fvp/fvp-watchdog.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32/fvp-board.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32/fvp-board.cfg new file mode 100644 index 0000000000..e49a1e367c --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32/fvp-board.cfg @@ -0,0 +1,12 @@ +CONFIG_ARM=y + +CONFIG_ARCH_VEXPRESS=y +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y + +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y + +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32/fvp-features.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32/fvp-features.cfg new file mode 100644 index 0000000000..12e7697104 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-arm32/fvp-features.cfg @@ -0,0 +1,9 @@ +CONFIG_BINFMT_MISC=y +CONFIG_BOUNCE=y +CONFIG_HIGHMEM=y +CONFIG_HIGHPTE=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_NEON=y +CONFIG_VFP=y +CONFIG_VFPv3=y + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64-preempt-rt.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64-preempt-rt.scc new file mode 100644 index 0000000000..e8fea0b822 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64-preempt-rt.scc @@ -0,0 +1,6 @@ +define KMACHINE fvp-baser-aemv8r64 +define KTYPE preempt-rt +define KARCH arm64 + +include ktypes/preempt-rt/preempt-rt.scc +include fvp-baser-aemv8r64.scc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64-standard.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64-standard.scc new file mode 100644 index 0000000000..fd1fb28246 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64-standard.scc @@ -0,0 +1,7 @@ +define KMACHINE fvp-baser-aemv8r64 +define KTYPE standard +define KARCH arm64 + +include ktypes/standard/standard.scc + +include fvp-baser-aemv8r64.scc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64.scc new file mode 100644 index 0000000000..a8d796702c --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-baser-aemv8r64.scc @@ -0,0 +1,4 @@ +kconf hardware arm64.cfg +kconf hardware fvp-common-peripherals.cfg +include cfg/virtio.scc +include virtio-9p.scc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-common-peripherals.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-common-peripherals.cfg new file mode 100644 index 0000000000..ecb3cc9da4 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-common-peripherals.cfg @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: MIT +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y + +CONFIG_ARM_SP805_WATCHDOG=y + +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-standard.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-standard.scc new file mode 100644 index 0000000000..d29e0b811b --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp-standard.scc @@ -0,0 +1,11 @@ +define KMACHINE fvp +define KTYPE standard +define KARCH arm64 + +include ktypes/standard/standard.scc + +include fvp.scc + +# default policy for standard kernels +#include features/latencytop/latencytop.scc +#include features/profiling/profiling.scc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp.scc new file mode 100644 index 0000000000..80b858198b --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp.scc @@ -0,0 +1,13 @@ +include features/input/input.scc +include features/net/net.scc +include cfg/timer/no_hz.scc +include cfg/virtio.scc + +kconf hardware fvp/fvp-board.cfg +kconf hardware fvp/fvp-net.cfg +kconf hardware fvp/fvp-rtc.cfg +kconf hardware fvp/fvp-serial.cfg +kconf hardware fvp/fvp-cfi.cfg +kconf hardware fvp/fvp-drm.cfg +kconf hardware fvp/fvp-timer.cfg +kconf hardware fvp/fvp-watchdog.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-board.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-board.cfg new file mode 100644 index 0000000000..2fd0264a27 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-board.cfg @@ -0,0 +1,11 @@ +CONFIG_ARM64=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y + +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y + +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-cfi.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-cfi.cfg new file mode 100644 index 0000000000..f28e0d9205 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-cfi.cfg @@ -0,0 +1,3 @@ +# CFI Flash +CONFIG_MTD=y +CONFIG_MTD_CFI=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-drm.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-drm.cfg new file mode 100644 index 0000000000..77133a9dfe --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-drm.cfg @@ -0,0 +1,5 @@ +# DRM CLCD +CONFIG_DRM=y +CONFIG_DRM_PL111=y +CONFIG_FB=y +CONFIG_FB_ARMCLCD=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-net.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-net.cfg new file mode 100644 index 0000000000..20cc408f39 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-net.cfg @@ -0,0 +1,3 @@ +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMSC911X=y +CONFIG_SMC91X=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-rtc.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-rtc.cfg new file mode 100644 index 0000000000..5d377b396a --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-rtc.cfg @@ -0,0 +1,2 @@ +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-serial.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-serial.cfg new file mode 100644 index 0000000000..4457164039 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-serial.cfg @@ -0,0 +1,2 @@ +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-timer.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-timer.cfg new file mode 100644 index 0000000000..b33c65c283 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-timer.cfg @@ -0,0 +1,4 @@ +# Dual timer module +CONFIG_COMPILE_TEST=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_CLK_SP810=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-watchdog.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-watchdog.cfg new file mode 100644 index 0000000000..977f317c86 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/fvp/fvp-watchdog.cfg @@ -0,0 +1,3 @@ +# Watchdog +CONFIG_WATCHDOG=y +CONFIG_ARM_SP805_WATCHDOG=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno-standard.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno-standard.scc new file mode 100644 index 0000000000..c9d2405a27 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno-standard.scc @@ -0,0 +1,11 @@ +define KMACHINE juno +define KTYPE standard +define KARCH arm64 + +include ktypes/standard/standard.scc + +include juno.scc + +# default policy for standard kernels +#include features/latencytop/latencytop.scc +#include features/profiling/profiling.scc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno.scc new file mode 100644 index 0000000000..2980b39338 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno.scc @@ -0,0 +1,22 @@ +include features/input/input.scc +include features/net/net.scc +include cfg/timer/no_hz.scc +include cfg/usb-mass-storage.scc + +kconf hardware juno/juno-board.cfg +kconf hardware juno/juno-devfreq.cfg +kconf hardware juno/juno-dma.cfg +kconf hardware juno/juno-drm.cfg +kconf hardware juno/juno-fb.cfg +kconf hardware juno/juno-i2c.cfg +# kconf hardware juno/juno-mali-midgard.cfg +kconf hardware juno/juno-mmc.cfg +kconf hardware juno/juno-net.cfg +kconf hardware juno/juno-pci.cfg +kconf hardware juno/juno-rtc.cfg +kconf hardware juno/juno-sata.cfg +kconf hardware juno/juno-serial.cfg +kconf hardware juno/juno-sound.cfg +kconf hardware juno/juno-thermal.cfg +kconf hardware juno/juno-usb.cfg + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-board.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-board.cfg new file mode 100644 index 0000000000..654efa4520 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-board.cfg @@ -0,0 +1,41 @@ +CONFIG_ARM64=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y + +# Keyboard over AMBA +CONFIG_SERIO=y +CONFIG_SERIO_AMBAKMI=y + +# Hardware mailbox +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=y + +# SCMI support +CONFIG_ARM_SCMI_PROTOCOL=y +CONFIG_ARM_SCMI_POWER_DOMAIN=y +CONFIG_SENSORS_ARM_SCMI=y +CONFIG_COMMON_CLK_SCMI=y + +# Power Interface and system control +CONFIG_ARM_SCPI_PROTOCOL=y +CONFIG_ARM_SCPI_POWER_DOMAIN=y +CONFIG_SENSORS_ARM_SCPI=y +CONFIG_COMMON_CLK_SCPI=y + +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y + +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y + +CONFIG_CPU_FREQ=y +CONFIG_ARM_SCPI_CPUFREQ=y + +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y + +CONFIG_CONNECTOR=y +CONFIG_ARM_TIMER_SP804=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-devfreq.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-devfreq.cfg new file mode 100644 index 0000000000..474e010528 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-devfreq.cfg @@ -0,0 +1,4 @@ +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-dma.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-dma.cfg new file mode 100644 index 0000000000..cbdffa3eec --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-dma.cfg @@ -0,0 +1,5 @@ +CONFIG_DMADEVICES=y +CONFIG_PL330_DMA=y +CONFIG_CMA=y +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=96 diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-drm.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-drm.cfg new file mode 100644 index 0000000000..1216297943 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-drm.cfg @@ -0,0 +1,5 @@ +CONFIG_DRM=y +CONFIG_DRM_HDLCD=y +CONFIG_DRM_I2C_NXP_TDA998X=y +CONFIG_FB=y +CONFIG_FB_ARMCLCD=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-fb.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-fb.cfg new file mode 100644 index 0000000000..59499fa649 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-fb.cfg @@ -0,0 +1,4 @@ +CONFIG_FB=y +CONFIG_FB_ARMCLCD=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_VGA_CONSOLE is not set diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-i2c.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-i2c.cfg new file mode 100644 index 0000000000..97f80c4391 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-i2c.cfg @@ -0,0 +1,2 @@ +CONFIG_I2C=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-mali-midgard.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-mali-midgard.cfg new file mode 100644 index 0000000000..adf02b7fbd --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-mali-midgard.cfg @@ -0,0 +1,7 @@ +CONFIG_MALI_MIDGARD=y +CONFIG_MALI_EXPERT=y +CONFIG_MALI_PLATFORM_FAKE=y +CONFIG_MALI_PLATFORM_THIRDPARTY=y +CONFIG_MALI_PLATFORM_THIRDPARTY_NAME="juno_soc" +CONFIG_MALI_PLATFORM_DEVICETREE=y +CONFIG_MALI_DEVFREQ=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-mmc.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-mmc.cfg new file mode 100644 index 0000000000..41af527c8a --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-mmc.cfg @@ -0,0 +1,2 @@ +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-net.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-net.cfg new file mode 100644 index 0000000000..54e3686d37 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-net.cfg @@ -0,0 +1,2 @@ +CONFIG_SMSC911X=y +CONFIG_SMC91X=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-pci.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-pci.cfg new file mode 100644 index 0000000000..295d190d19 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-pci.cfg @@ -0,0 +1,11 @@ +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIE_ECRC=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-rtc.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-rtc.cfg new file mode 100644 index 0000000000..5d377b396a --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-rtc.cfg @@ -0,0 +1,2 @@ +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-sata.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-sata.cfg new file mode 100644 index 0000000000..a159af8f98 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-sata.cfg @@ -0,0 +1,3 @@ +CONFIG_ATA=y +CONFIG_SATA_SIL24=y +CONFIG_SKY2=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-serial.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-serial.cfg new file mode 100644 index 0000000000..4457164039 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-serial.cfg @@ -0,0 +1,2 @@ +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-sound.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-sound.cfg new file mode 100644 index 0000000000..d3419efdb5 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-sound.cfg @@ -0,0 +1,14 @@ +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQ_DUMMY=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=y +CONFIG_SND_PCM_OSS=y +CONFIG_SND_SEQUENCER_OSS=y +# CONFIG_SND_USB is not set +CONFIG_SND_SOC=y +CONFIG_SND_DESIGNWARE_I2S=y +CONFIG_SND_SOC_HDMI_CODEC=y +CONFIG_SND_SOC_SPDIF=y +CONFIG_SND_SIMPLE_CARD=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-thermal.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-thermal.cfg new file mode 100644 index 0000000000..6241374a64 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-thermal.cfg @@ -0,0 +1,5 @@ +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-usb.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-usb.cfg new file mode 100644 index 0000000000..9159de157e --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/juno/juno-usb.cfg @@ -0,0 +1,7 @@ +CONFIG_USB_STORAGE=y +CONFIG_USB=y +CONFIG_USB_ULPI=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_OHCI_HCD=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp-preempt-rt.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp-preempt-rt.scc new file mode 100644 index 0000000000..dc844458bb --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp-preempt-rt.scc @@ -0,0 +1,6 @@ +define KMACHINE n1sdp +define KTYPE preempt-rt +define KARCH arm64 + +include ktypes/preempt-rt/preempt-rt.scc +include n1sdp/disable-kvm.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp-standard.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp-standard.scc new file mode 100644 index 0000000000..8536c818c9 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp-standard.scc @@ -0,0 +1,5 @@ +define KMACHINE n1sdp +define KTYPE standard +define KARCH arm64 + +include ktypes/standard/standard.scc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp/disable-kvm.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp/disable-kvm.cfg new file mode 100644 index 0000000000..617d3e51b1 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/n1sdp/disable-kvm.cfg @@ -0,0 +1 @@ +# CONFIG_KVM is not set diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc-autofdo.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc-autofdo.scc new file mode 100644 index 0000000000..5b20b6c085 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc-autofdo.scc @@ -0,0 +1 @@ +kconf non-hardware tc/autofdo.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc.scc new file mode 100644 index 0000000000..43d2153d69 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc.scc @@ -0,0 +1,9 @@ +kconf hardware tc/base.cfg +kconf non-hardware tc/dhcp.cfg +kconf non-hardware tc/devtmpfs.cfg +kconf non-hardware tc/gralloc.cfg +kconf non-hardware tc/mali.cfg +kconf non-hardware tc/tee.cfg +kconf non-hardware tc/virtio.cfg +kconf non-hardware tc/ci700.cfg +kconf non-hardware tc/trusty.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/autofdo.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/autofdo.cfg new file mode 100644 index 0000000000..8530c88409 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/autofdo.cfg @@ -0,0 +1,3 @@ +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_TRBE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/base.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/base.cfg new file mode 100644 index 0000000000..90b08f128c --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/base.cfg @@ -0,0 +1,12 @@ +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARM_MHU=y +CONFIG_ARM_MHU_V2=y +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM64_VA_BITS_48=y +CONFIG_COMMON_CLK_SCMI=y +CONFIG_DRM_HDLCD=y +CONFIG_DRM_KOMEDA=y +CONFIG_DRM_VIRT_ENCODER=y +CONFIG_MMC_ARMMMCI=y +CONFIG_SERIO_AMBAKMI=y +CONFIG_SMC91X=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/ci700.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/ci700.cfg new file mode 100644 index 0000000000..50c015319f --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/ci700.cfg @@ -0,0 +1 @@ +CONFIG_ARM_CMN=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/devtmpfs.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/devtmpfs.cfg new file mode 100644 index 0000000000..abde41232b --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/devtmpfs.cfg @@ -0,0 +1,3 @@ +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_VT=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/dhcp.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/dhcp.cfg new file mode 100644 index 0000000000..78c5a040dd --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/dhcp.cfg @@ -0,0 +1,2 @@ +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/gralloc.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/gralloc.cfg new file mode 100644 index 0000000000..22abcb5456 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/gralloc.cfg @@ -0,0 +1,2 @@ +CONFIG_DMABUF_HEAPS_SYSTEM=y +CONFIG_DMABUF_HEAPS_CMA=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/mali.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/mali.cfg new file mode 100644 index 0000000000..166818f97c --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/mali.cfg @@ -0,0 +1 @@ +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/tee.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/tee.cfg new file mode 100644 index 0000000000..4b9cbd8ce3 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/tee.cfg @@ -0,0 +1,3 @@ +CONFIG_TEE=y +CONFIG_OPTEE=y +CONFIG_ARM_FFA_TRANSPORT=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/trusty.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/trusty.cfg new file mode 100644 index 0000000000..54e8657f26 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/trusty.cfg @@ -0,0 +1 @@ +CONFIG_TRUSTY=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/virtio.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/virtio.cfg new file mode 100644 index 0000000000..9e6d21c4e8 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/tc/virtio.cfg @@ -0,0 +1,2 @@ +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_MMIO=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/virtio-9p.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/virtio-9p.cfg new file mode 100644 index 0000000000..c9fefa14b6 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/virtio-9p.cfg @@ -0,0 +1,4 @@ +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_9P_FS=y +CONFIG_9P_FS_POSIX_ACL=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/virtio-9p.scc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/virtio-9p.scc new file mode 100644 index 0000000000..33c0465c66 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/arm-platforms-kmeta/bsp/arm-platforms/virtio-9p.scc @@ -0,0 +1 @@ +kconf non-hardware virtio-9p.cfg diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/0001-UPSTREAM-firmware-arm_ffa-Handle-compatibility-with-.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/0001-UPSTREAM-firmware-arm_ffa-Handle-compatibility-with-.patch new file mode 100644 index 0000000000..a4fd673084 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/0001-UPSTREAM-firmware-arm_ffa-Handle-compatibility-with-.patch @@ -0,0 +1,90 @@ +Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8e3f9da608f14cfebac2659d8dd8737b79d01308] +Signed-off-by: Abdellatif El Khlifi <abdellatif.elkhlifi@arm.com> + +From a8f3e351c07c48774be2a45e184b9f08dc92f1db Mon Sep 17 00:00:00 2001 +From: Sudeep Holla <sudeep.holla@arm.com> +Date: Wed, 13 Apr 2022 15:43:19 +0100 +Subject: [PATCH] UPSTREAM: firmware: arm_ffa: Handle compatibility with + different firmware versions + +The driver currently just support v1.0 of Arm FFA specification. It also +expects the firmware implementation to match the same and bail out if it +doesn't match. This is causing issue when running with higher version of +firmware implementation(e.g. v1.1 which will released soon). + +In order to support compatibility with different firmware versions, let +us add additional checks and find the compatible version the driver can +work with. + +Link: https://lore.kernel.org/r/20211013091127.990992-1-sudeep.holla@arm.com +Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org> +Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> +(cherry picked from commit 8e3f9da608f14cfebac2659d8dd8737b79d01308) +Change-Id: I7bc9a3b172a9067bfd4e9bb9d50b4729e915b5a5 +Bug: 168585974 +--- + drivers/firmware/arm_ffa/driver.c | 37 ++++++++++++++++++++++++++----- + 1 file changed, 32 insertions(+), 5 deletions(-) + +diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c +index c9fb56afbcb4..6e0c883ab708 100644 +--- a/drivers/firmware/arm_ffa/driver.c ++++ b/drivers/firmware/arm_ffa/driver.c +@@ -167,6 +167,28 @@ struct ffa_drv_info { + + static struct ffa_drv_info *drv_info; + ++/* ++ * The driver must be able to support all the versions from the earliest ++ * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION. ++ * The specification states that if firmware supports a FFA implementation ++ * that is incompatible with and at a greater version number than specified ++ * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION), ++ * it must return the NOT_SUPPORTED error code. ++ */ ++static u32 ffa_compatible_version_find(u32 version) ++{ ++ u32 compat_version; ++ u16 major = MAJOR_VERSION(version), minor = MINOR_VERSION(version); ++ u16 drv_major = MAJOR_VERSION(FFA_DRIVER_VERSION); ++ u16 drv_minor = MINOR_VERSION(FFA_DRIVER_VERSION); ++ ++ if ((major < drv_major) || (major == drv_major && minor <= drv_minor)) ++ return version; ++ ++ pr_info("Firmware version higher than driver version, downgrading\n"); ++ return FFA_DRIVER_VERSION; ++} ++ + static int ffa_version_check(u32 *version) + { + ffa_value_t ver; +@@ -180,15 +202,20 @@ static int ffa_version_check(u32 *version) + return -EOPNOTSUPP; + } + +- if (ver.a0 < FFA_MIN_VERSION || ver.a0 > FFA_DRIVER_VERSION) { +- pr_err("Incompatible version %d.%d found\n", +- MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0)); ++ if (ver.a0 < FFA_MIN_VERSION) { ++ pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", ++ MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0), ++ MAJOR_VERSION(FFA_MIN_VERSION), ++ MINOR_VERSION(FFA_MIN_VERSION)); + return -EINVAL; + } + +- *version = ver.a0; +- pr_info("Version %d.%d found\n", MAJOR_VERSION(ver.a0), ++ pr_info("Driver version %d.%d\n", MAJOR_VERSION(FFA_DRIVER_VERSION), ++ MINOR_VERSION(FFA_DRIVER_VERSION)); ++ pr_info("Firmware version %d.%d found\n", MAJOR_VERSION(ver.a0), + MINOR_VERSION(ver.a0)); ++ *version = ffa_compatible_version_find(ver.a0); ++ + return 0; + } + +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/corstone1000_kernel_debug.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/corstone1000_kernel_debug.cfg new file mode 100644 index 0000000000..aad9e93a64 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/corstone1000_kernel_debug.cfg @@ -0,0 +1,3 @@ +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO_DWARF4=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/defconfig b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/defconfig new file mode 100644 index 0000000000..5f0a7e9198 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/corstone1000/defconfig @@ -0,0 +1,94 @@ +CONFIG_LOCALVERSION="-yocto-standard" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_LOG_BUF_SHIFT=13 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=13 +CONFIG_RELAY=y +CONFIG_BOOT_CONFIG=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_CMDLINE="console=ttyAMA0 loglevel=9" +CONFIG_EFI=y +# CONFIG_SUSPEND is not set +CONFIG_ARM_FFA_TRANSPORT=y +CONFIG_EFI_BOOTLOADER_CONTROL=y +CONFIG_EFI_CAPSULE_LOADER=y +CONFIG_EFI_TEST=y +CONFIG_RESET_ATTACK_MITIGATION=y +# CONFIG_STACKPROTECTOR is not set +CONFIG_MODULES=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_SCHED=y +CONFIG_DEVTMPFS=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +CONFIG_SMC91X=y +CONFIG_SMSC911X=y +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_USB=y +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=y +CONFIG_USB_ISP1760=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y +CONFIG_TEE=y +CONFIG_OPTEE=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_UTF8=y +CONFIG_LIBCRC32C=y +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_DEBUG_FS=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_STACKTRACE=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/fvp-base-arm32/0001-ARM-vexpress-enable-GICv3.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/fvp-base-arm32/0001-ARM-vexpress-enable-GICv3.patch new file mode 100644 index 0000000000..d0a05c2462 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/fvp-base-arm32/0001-ARM-vexpress-enable-GICv3.patch @@ -0,0 +1,31 @@ +From 5dbb6c4267b1e46ed08359be363d8bc9b6a79397 Mon Sep 17 00:00:00 2001 +From: Ryan Harkin <ryan.harkin@linaro.org> +Date: Wed, 16 Nov 2016 14:43:02 +0000 +Subject: [PATCH] ARM: vexpress: enable GICv3 + +Upstream-Status: Pending + +ARMv8 targets such as ARM's FVP Cortex-A32 model can run the 32-bit +ARMv7 kernel. And these targets often contain GICv3. + +Signed-off-by: Ryan Harkin <ryan.harkin@linaro.org> +Signed-off-by: Jon Medhurst <tixy@linaro.org> +--- + arch/arm/mach-vexpress/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig +index 7c728ebc0b33..ed579382d41f 100644 +--- a/arch/arm/mach-vexpress/Kconfig ++++ b/arch/arm/mach-vexpress/Kconfig +@@ -4,6 +4,7 @@ menuconfig ARCH_VEXPRESS + select ARCH_SUPPORTS_BIG_ENDIAN + select ARM_AMBA + select ARM_GIC ++ select ARM_GIC_V3 + select ARM_GLOBAL_TIMER + select ARM_TIMER_SP804 + select COMMON_CLK_VERSATILE +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/fvp-baser-aemv8r64/fvp-baser-aemv8r64.dts b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/fvp-baser-aemv8r64/fvp-baser-aemv8r64.dts new file mode 100644 index 0000000000..6911a598f7 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/fvp-baser-aemv8r64/fvp-baser-aemv8r64.dts @@ -0,0 +1,212 @@ +/dts-v1/; + +/ { + + #address-cells = <0x2>; + #size-cells = <0x2>; + interrupt-parent = <0x1>; + model = "Generated"; + compatible = "arm,base"; + + memory@0 { + #address-cells = <0x2>; + #size-cells = <0x2>; + device_type = "memory"; + reg = <0x0 0x0 0x0 0x80000000>, + <0x00000008 0x80000000 0x0 0x80000000>; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu-map { + cluster0 { + core0 { thread0 { cpu = <&CPU_0>; }; }; + core1 { thread0 { cpu = <&CPU_1>; }; }; + core2 { thread0 { cpu = <&CPU_2>; }; }; + core3 { thread0 { cpu = <&CPU_3>; }; }; + }; + }; + + CPU_0: cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x7f800>; + }; + + CPU_1: cpu@1 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x7f808>; + }; + + CPU_2: cpu@2 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x7f810>; + }; + + CPU_3: cpu@3 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "spin-table"; + cpu-release-addr = <0x0 0x7f818>; + }; + }; + + interrupt-controller@af000000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <0x3>; + #address-cells = <0x2>; + #size-cells = <0x2>; + ranges; + interrupt-controller; + #redistributor-regions = <0x1>; + reg = <0x0 0xaf000000 0x0 0x10000>, // GICD + <0x0 0xaf100000 0x0 0x100000>, // GICR + <0x0 0xac000000 0x0 0x2000>, // GICC + <0x0 0xac010000 0x0 0x2000>, // GICH + <0x0 0xac02f000 0x0 0x2000>; // GICV + interrupts = <0x1 9 0x4>; + linux,phandle = <0x1>; + phandle = <0x1>; + + its: msi-controller@2f020000 { + #msi-cells = <1>; + compatible = "arm,gic-v3-its"; + reg = <0x0 0xaf020000 0x0 0x20000>; // GITS + msi-controller; + }; + + }; + + refclk100mhz: refclk100mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <100000000>; + clock-output-names = "apb_pclk"; + }; + + refclk24mhz: refclk24mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "refclk24mhz"; + }; + + refclk1hz: refclk1hz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <1>; + clock-output-names = "refclk1hz"; + }; + + uart@9c090000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x9c090000 0x0 0x1000>; + interrupts = <0x0 5 0x4>; + clocks = <&refclk24mhz>, <&refclk100mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart@9c0a0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x9c0a0000 0x0 0x1000>; + interrupts = <0x0 6 0x4>; + clocks = <&refclk24mhz>, <&refclk100mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart@9c0b0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x9c0b0000 0x0 0x1000>; + interrupts = <0x0 7 0x4>; + clocks = <&refclk24mhz>, <&refclk100mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + uart@9c0c0000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0x9c0c0000 0x0 0x1000>; + interrupts = <0x0 8 0x4>; + clocks = <&refclk24mhz>, <&refclk100mhz>; + clock-names = "uartclk", "apb_pclk"; + }; + + wdt@9c0f0000 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0x9c0f0000 0x0 0x1000>; + interrupts = <0x0 0 0x4>; + clocks = <&refclk24mhz>, <&refclk100mhz>; + clock-names = "wdog_clk", "apb_pclk"; + }; + + rtc@9c170000 { + compatible = "arm,pl031", "arm,primecell"; + reg = <0x0 0x9c170000 0x0 0x1000>; + interrupts = <0x0 4 0x4>; + clocks = <&refclk1hz>; + clock-names = "apb_pclk"; + }; + + virtio-block@9c130000 { + compatible = "virtio,mmio"; + reg = <0 0x9c130000 0 0x200>; + interrupts = <0x0 42 0x4>; + }; + + virtio-p9@9c140000{ + compatible = "virtio,mmio"; + reg = <0x0 0x9c140000 0x0 0x1000>; + interrupts = <0x0 43 0x4>; + }; + + virtio-net@9c150000 { + compatible = "virtio,mmio"; + reg = <0 0x9c150000 0 0x200>; + interrupts = <0x0 44 0x4>; + }; + + virtio-rng@9c200000 { + compatible = "virtio,mmio"; + reg = <0 0x9c200000 0 0x200>; + interrupts = <0x0 46 0x4>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <0x1 13 0xff08>, + <0x1 14 0xff08>, + <0x1 11 0xff08>, + <0x1 4 0xff08>; + clock-frequency = <100000000>; + }; + + aliases { + serial0 = "/uart@9c090000"; + serial1 = "/uart@9c0a0000"; + serial2 = "/uart@9c0b0000"; + serial3 = "/uart@9c0c0000"; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = <0 60 4>, + <0 61 4>, + <0 62 4>, + <0 63 4>; + }; + + chosen { + bootargs = "earlycon console=ttyAMA0 loglevel=8 rootfstype=ext4 root=/dev/vda1 rw"; + stdout-path = "serial0"; + }; +}; diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/juno/juno-dts-mhu-doorbell.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/juno/juno-dts-mhu-doorbell.patch new file mode 100644 index 0000000000..81f641c478 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/files/juno/juno-dts-mhu-doorbell.patch @@ -0,0 +1,616 @@ +Add MHU doorbell support and SCMI device nodes to the Juno DeviceTree. + +Patch taken from https://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux.git/log/?h=scmi_dt_defconfig + +Upstream-Status: Pending +Signed-off-by: Ross Burton <ross.burton@arm.com> + +From 821ffd8e5dc4d2fb2716d5fb912b343b932e1e77 Mon Sep 17 00:00:00 2001 +From: Sudeep Holla <sudeep.holla@arm.com> +Date: Thu, 20 Apr 2017 11:58:01 +0100 +Subject: [PATCH] arm64: dts: juno: add mhu doorbell support and scmi device + nodes + +Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> +--- + arch/arm64/boot/dts/arm/juno-base.dtsi | 139 ++++++++++++---------- + arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi | 6 +- + arch/arm64/boot/dts/arm/juno-r1.dts | 12 +- + arch/arm64/boot/dts/arm/juno-r2.dts | 12 +- + arch/arm64/boot/dts/arm/juno.dts | 12 +- + 5 files changed, 96 insertions(+), 85 deletions(-) + +diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi +index 6288e104a089..36844f7d861e 100644 +--- a/arch/arm64/boot/dts/arm/juno-base.dtsi ++++ b/arch/arm64/boot/dts/arm/juno-base.dtsi +@@ -23,11 +23,12 @@ frame@2a830000 { + }; + + mailbox: mhu@2b1f0000 { +- compatible = "arm,mhu", "arm,primecell"; ++ compatible = "arm,mhu-doorbell", "arm,primecell"; + reg = <0x0 0x2b1f0000 0x0 0x1000>; + interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; +- #mbox-cells = <1>; ++ #mbox-cells = <2>; ++ mbox-name = "ARM-MHU"; + clocks = <&soc_refclk100mhz>; + clock-names = "apb_pclk"; + }; +@@ -39,7 +40,7 @@ smmu_gpu: iommu@2b400000 { + <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; + #iommu-cells = <1>; + #global-interrupts = <1>; +- power-domains = <&scpi_devpd 1>; ++ power-domains = <&scmi_devpd 9>; + dma-coherent; + status = "disabled"; + }; +@@ -63,7 +64,7 @@ smmu_etr: iommu@2b600000 { + #iommu-cells = <1>; + #global-interrupts = <1>; + dma-coherent; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + gic: interrupt-controller@2c010000 { +@@ -123,7 +124,7 @@ etf@20010000 { /* etf0 */ + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + + in-ports { + port { +@@ -147,7 +148,7 @@ tpiu@20030000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + in-ports { + port { + tpiu_in_port: endpoint { +@@ -164,7 +165,7 @@ main_funnel: funnel@20040000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + + out-ports { + port { +@@ -201,7 +202,7 @@ etr@20070000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + arm,scatter-gather; + in-ports { + port { +@@ -220,7 +221,7 @@ stm@20100000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + stm_out_port: endpoint { +@@ -235,7 +236,7 @@ replicator@20120000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + + out-ports { + #address-cells = <1>; +@@ -270,7 +271,7 @@ cpu_debug0: cpu-debug@22010000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + etm0: etm@22040000 { +@@ -279,7 +280,7 @@ etm0: etm@22040000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster0_etm0_out_port: endpoint { +@@ -295,7 +296,7 @@ funnel@220c0000 { /* cluster0 funnel */ + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster0_funnel_out_port: endpoint { +@@ -330,7 +331,7 @@ cpu_debug1: cpu-debug@22110000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + etm1: etm@22140000 { +@@ -339,7 +340,7 @@ etm1: etm@22140000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster0_etm1_out_port: endpoint { +@@ -355,7 +356,7 @@ cpu_debug2: cpu-debug@23010000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + etm2: etm@23040000 { +@@ -364,7 +365,7 @@ etm2: etm@23040000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster1_etm0_out_port: endpoint { +@@ -380,7 +381,7 @@ funnel@230c0000 { /* cluster1 funnel */ + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster1_funnel_out_port: endpoint { +@@ -427,7 +428,7 @@ cpu_debug3: cpu-debug@23110000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + etm3: etm@23140000 { +@@ -436,7 +437,7 @@ etm3: etm@23140000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster1_etm1_out_port: endpoint { +@@ -452,7 +453,7 @@ cpu_debug4: cpu-debug@23210000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + etm4: etm@23240000 { +@@ -461,7 +462,7 @@ etm4: etm@23240000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster1_etm2_out_port: endpoint { +@@ -477,7 +478,7 @@ cpu_debug5: cpu-debug@23310000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + }; + + etm5: etm@23340000 { +@@ -486,7 +487,7 @@ etm5: etm@23340000 { + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + cluster1_etm3_out_port: endpoint { +@@ -503,8 +504,8 @@ gpu: gpu@2d000000 { + <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "job", "mmu", "gpu"; +- clocks = <&scpi_dvfs 2>; +- power-domains = <&scpi_devpd 1>; ++ clocks = <&scmi_dvfs 2>; ++ power-domains = <&scmi_devpd 9>; + dma-coherent; + /* The SMMU is only really of interest to bare-metal hypervisors */ + /* iommus = <&smmu_gpu 0>; */ +@@ -519,14 +520,24 @@ sram: sram@2e000000 { + #size-cells = <1>; + ranges = <0 0x0 0x2e000000 0x8000>; + +- cpu_scp_lpri: scp-sram@0 { +- compatible = "arm,juno-scp-shmem"; +- reg = <0x0 0x200>; ++ cpu_scp_lpri0: scp-sram@0 { ++ compatible = "arm,scmi-shmem"; ++ reg = <0x0 0x80>; + }; + +- cpu_scp_hpri: scp-sram@200 { +- compatible = "arm,juno-scp-shmem"; +- reg = <0x200 0x200>; ++ cpu_scp_lpri1: scp-sram@80 { ++ compatible = "arm,scmi-shmem"; ++ reg = <0x80 0x80>; ++ }; ++ ++ cpu_scp_hpri0: scp-sram@100 { ++ compatible = "arm,scmi-shmem"; ++ reg = <0x100 0x80>; ++ }; ++ ++ cpu_scp_hpri1: scp-sram@180 { ++ compatible = "arm,scmi-shmem"; ++ reg = <0x180 0x80>; + }; + }; + +@@ -558,37 +569,37 @@ pcie_ctlr: pcie@40000000 { + iommu-map = <0x0 &smmu_pcie 0x0 0x1>; + }; + +- scpi { +- compatible = "arm,scpi"; +- mboxes = <&mailbox 1>; +- shmem = <&cpu_scp_hpri>; ++ firmware { ++ scmi { ++ compatible = "arm,scmi"; ++ mbox-names = "tx", "rx"; ++ mboxes = <&mailbox 0 0 &mailbox 0 1>; ++ shmem = <&cpu_scp_lpri0 &cpu_scp_lpri1>; ++ #address-cells = <1>; ++ #size-cells = <0>; + +- clocks { +- compatible = "arm,scpi-clocks"; ++ scmi_devpd: protocol@11 { ++ reg = <0x11>; ++ #power-domain-cells = <1>; ++ }; + +- scpi_dvfs: clocks-0 { +- compatible = "arm,scpi-dvfs-clocks"; ++ scmi_dvfs: protocol@13 { ++ reg = <0x13>; + #clock-cells = <1>; +- clock-indices = <0>, <1>, <2>; +- clock-output-names = "atlclk", "aplclk","gpuclk"; ++ mbox-names = "tx", "rx"; ++ mboxes = <&mailbox 1 0 &mailbox 1 1>; ++ shmem = <&cpu_scp_hpri0 &cpu_scp_hpri1>; + }; +- scpi_clk: clocks-1 { +- compatible = "arm,scpi-variable-clocks"; ++ ++ scmi_clk: protocol@14 { ++ reg = <0x14>; + #clock-cells = <1>; +- clock-indices = <3>; +- clock-output-names = "pxlclk"; + }; +- }; + +- scpi_devpd: power-controller { +- compatible = "arm,scpi-power-domains"; +- num-domains = <2>; +- #power-domain-cells = <1>; +- }; +- +- scpi_sensors0: sensors { +- compatible = "arm,scpi-sensors"; +- #thermal-sensor-cells = <1>; ++ scmi_sensors0: protocol@15 { ++ reg = <0x15>; ++ #thermal-sensor-cells = <1>; ++ }; + }; + }; + +@@ -596,40 +607,40 @@ thermal-zones { + pmic { + polling-delay = <1000>; + polling-delay-passive = <100>; +- thermal-sensors = <&scpi_sensors0 0>; ++ thermal-sensors = <&scmi_sensors0 0>; + }; + + soc { + polling-delay = <1000>; + polling-delay-passive = <100>; +- thermal-sensors = <&scpi_sensors0 3>; ++ thermal-sensors = <&scmi_sensors0 3>; + }; + + big_cluster_thermal_zone: big-cluster { + polling-delay = <1000>; + polling-delay-passive = <100>; +- thermal-sensors = <&scpi_sensors0 21>; ++ thermal-sensors = <&scmi_sensors0 21>; + status = "disabled"; + }; + + little_cluster_thermal_zone: little-cluster { + polling-delay = <1000>; + polling-delay-passive = <100>; +- thermal-sensors = <&scpi_sensors0 22>; ++ thermal-sensors = <&scmi_sensors0 22>; + status = "disabled"; + }; + + gpu0_thermal_zone: gpu0 { + polling-delay = <1000>; + polling-delay-passive = <100>; +- thermal-sensors = <&scpi_sensors0 23>; ++ thermal-sensors = <&scmi_sensors0 23>; + status = "disabled"; + }; + + gpu1_thermal_zone: gpu1 { + polling-delay = <1000>; + polling-delay-passive = <100>; +- thermal-sensors = <&scpi_sensors0 24>; ++ thermal-sensors = <&scmi_sensors0 24>; + status = "disabled"; + }; + }; +@@ -705,7 +716,7 @@ hdlcd@7ff50000 { + reg = <0 0x7ff50000 0 0x1000>; + interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>; + iommus = <&smmu_hdlcd1 0>; +- clocks = <&scpi_clk 3>; ++ clocks = <&scmi_clk 3>; + clock-names = "pxlclk"; + + port { +@@ -720,7 +731,7 @@ hdlcd@7ff60000 { + reg = <0 0x7ff60000 0 0x1000>; + interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; + iommus = <&smmu_hdlcd0 0>; +- clocks = <&scpi_clk 3>; ++ clocks = <&scmi_clk 3>; + clock-names = "pxlclk"; + + port { +diff --git a/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi b/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi +index eda3d9e18af6..e6ecb0dfcbcd 100644 +--- a/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi ++++ b/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi +@@ -6,7 +6,7 @@ funnel@20130000 { /* cssys1 */ + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + csys1_funnel_out_port: endpoint { +@@ -29,7 +29,7 @@ etf@20140000 { /* etf1 */ + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + in-ports { + port { + etf1_in_port: endpoint { +@@ -52,7 +52,7 @@ funnel@20150000 { /* cssys2 */ + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; +- power-domains = <&scpi_devpd 0>; ++ power-domains = <&scmi_devpd 8>; + out-ports { + port { + csys2_funnel_out_port: endpoint { +diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts +index 0e24e29eb9b1..fee67943f4d5 100644 +--- a/arch/arm64/boot/dts/arm/juno-r1.dts ++++ b/arch/arm64/boot/dts/arm/juno-r1.dts +@@ -96,7 +96,7 @@ A57_0: cpu@0 { + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&A57_L2>; +- clocks = <&scpi_dvfs 0>; ++ clocks = <&scmi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <1024>; + }; +@@ -113,7 +113,7 @@ A57_1: cpu@1 { + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&A57_L2>; +- clocks = <&scpi_dvfs 0>; ++ clocks = <&scmi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <1024>; + }; +@@ -130,7 +130,7 @@ A53_0: cpu@100 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + }; +@@ -147,7 +147,7 @@ A53_1: cpu@101 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + }; +@@ -164,7 +164,7 @@ A53_2: cpu@102 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + }; +@@ -181,7 +181,7 @@ A53_3: cpu@103 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + }; +diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts +index e609420ce3e4..7792626eb29e 100644 +--- a/arch/arm64/boot/dts/arm/juno-r2.dts ++++ b/arch/arm64/boot/dts/arm/juno-r2.dts +@@ -96,7 +96,7 @@ A72_0: cpu@0 { + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&A72_L2>; +- clocks = <&scpi_dvfs 0>; ++ clocks = <&scmi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <450>; +@@ -114,7 +114,7 @@ A72_1: cpu@1 { + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&A72_L2>; +- clocks = <&scpi_dvfs 0>; ++ clocks = <&scmi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <450>; +@@ -132,7 +132,7 @@ A53_0: cpu@100 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; +@@ -150,7 +150,7 @@ A53_1: cpu@101 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; +@@ -168,7 +168,7 @@ A53_2: cpu@102 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; +@@ -186,7 +186,7 @@ A53_3: cpu@103 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; +diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts +index f00cffbd032c..a28316c65c1b 100644 +--- a/arch/arm64/boot/dts/arm/juno.dts ++++ b/arch/arm64/boot/dts/arm/juno.dts +@@ -95,7 +95,7 @@ A57_0: cpu@0 { + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&A57_L2>; +- clocks = <&scpi_dvfs 0>; ++ clocks = <&scmi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <530>; +@@ -113,7 +113,7 @@ A57_1: cpu@1 { + d-cache-line-size = <64>; + d-cache-sets = <256>; + next-level-cache = <&A57_L2>; +- clocks = <&scpi_dvfs 0>; ++ clocks = <&scmi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <530>; +@@ -131,7 +131,7 @@ A53_0: cpu@100 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; +@@ -149,7 +149,7 @@ A53_1: cpu@101 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; +@@ -167,7 +167,7 @@ A53_2: cpu@102 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; +@@ -185,7 +185,7 @@ A53_3: cpu@103 { + d-cache-line-size = <64>; + d-cache-sets = <128>; + next-level-cache = <&A53_L2>; +- clocks = <&scpi_dvfs 1>; ++ clocks = <&scmi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm-platforms.inc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm-platforms.inc new file mode 100644 index 0000000000..f05c5ffc46 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm-platforms.inc @@ -0,0 +1,185 @@ +# Kernel configuration and dts specific information + +# +# Kernel configurations and dts (If not using Linux provided ones) are captured +# in this file. Update SRC_URI and do_patch for building images with custom dts +# + +# We can't set FILESEXTRAPATHS once because of how the kernel classes search for +# config fragments. Discussion is ongoing as to whether this is the correct +# solution, or a workaround. +# https://bugzilla.yoctoproject.org/show_bug.cgi?id=14154 +ARMBSPFILESPATHS := "${THISDIR}:${THISDIR}/files:" + +# Arm platforms kmeta +SRC_URI_KMETA = "file://arm-platforms-kmeta;type=kmeta;name=arm-platforms-kmeta;destsuffix=arm-platforms-kmeta" +SRC_URI:append:fvp-base = " ${SRC_URI_KMETA}" +SRC_URI:append:fvp-base-arm32 = " ${SRC_URI_KMETA}" +SRC_URI:append:fvp-baser-aemv8r64 = " ${SRC_URI_KMETA}" +SRC_URI:append:juno = " ${SRC_URI_KMETA}" +SRC_URI:append:n1sdp = " ${SRC_URI_KMETA}" +SRC_URI:append:tc = " ${SRC_URI_KMETA}" +SRCREV:arm-platforms-kmeta = "6147e82375aa9df8f2a162d42ea6406c79c854c5" + +# +# Corstone-500 KMACHINE +# +COMPATIBLE_MACHINE:corstone500 = "corstone500" +KBUILD_DEFCONFIG:corstone500 = "multi_v7_defconfig" +KCONFIG_MODE:corstone500 = "--alldefconfig" + +# +# Corstone1000 KMACHINE +# +FILESEXTRAPATHS:prepend:corstone1000 := "${ARMBSPFILESPATHS}" +COMPATIBLE_MACHINE:corstone1000 = "${MACHINE}" +KCONFIG_MODE:corstone1000 = "--alldefconfig" +KMACHINE:corstone1000 = "corstone1000" +LINUX_KERNEL_TYPE:corstone1000 = "standard" +#disabling the rootfs cpio file compression so it is not compressed twice when bundled with the kernel +KERNEL_EXTRA_ARGS:corstone1000 += "CONFIG_INITRAMFS_COMPRESSION_NONE=y" +SRC_URI:append:corstone1000 = " \ + file://defconfig \ + file://0001-UPSTREAM-firmware-arm_ffa-Handle-compatibility-with-.patch \ + " + +SRC_URI:append:corstone1000 = " ${@bb.utils.contains('MACHINE_FEATURES', \ + 'corstone1000_kernel_debug', \ + 'file://corstone1000_kernel_debug.cfg', \ + '', \ + d)}" + +# Default kernel features not needed for corstone1000 +# otherwise the extra kernel modules will increase the rootfs size +# corstone1000 has limited flash memory constraints +KERNEL_EXTRA_FEATURES:corstone1000 = "" +KERNEL_FEATURES:corstone1000 = "" + +# +# FVP BASE KMACHINE +# +COMPATIBLE_MACHINE:fvp-base = "fvp-base" +KMACHINE:fvp-base = "fvp" +FILESEXTRAPATHS:prepend:fvp-base := "${ARMBSPFILESPATHS}" + +# +# FVP BASE ARM32 KMACHINE +# +COMPATIBLE_MACHINE:fvp-base-arm32 = "fvp-base-arm32" +KMACHINE:fvp-base-arm32 = "fvp-arm32" +FILESEXTRAPATHS:prepend:fvp-base-arm32 := "${ARMBSPFILESPATHS}" +SRC_URI:append:fvp-base-arm32 = " file://0001-ARM-vexpress-enable-GICv3.patch" +# We want to use the DT in the arm64 tree but the kernel build doesn't like that, so symlink it +do_compile:prepend:fvp-base-arm32() { + mkdir --parents ${S}/arch/arm/boot/dts/arm + for file in fvp-base-revc.dts rtsm_ve-motherboard.dtsi rtsm_ve-motherboard-rs2.dtsi; do + ln -fsr ${S}/arch/arm64/boot/dts/arm/$file ${S}/arch/arm/boot/dts/arm + done +} + +# +# FVP BaseR AEMv8r64 Machine +# +COMPATIBLE_MACHINE:fvp-baser-aemv8r64 = "fvp-baser-aemv8r64" +FILESEXTRAPATHS:prepend:fvp-baser-aemv8r64 := "${ARMBSPFILESPATHS}" +SRC_URI:append:fvp-baser-aemv8r64 = " file://fvp-baser-aemv8r64.dts;subdir=git/arch/arm64/boot/dts/arm" + +# +# Juno KMACHINE +# +COMPATIBLE_MACHINE:juno = "juno" +KBUILD_DEFCONFIG:juno = "defconfig" +KCONFIG_MODE:juno = "--alldefconfig" +FILESEXTRAPATHS:prepend:juno := "${ARMBSPFILESPATHS}" +SRC_URI:append:juno = " file://juno-dts-mhu-doorbell.patch" + +# +# Musca B1/S2 can't run Linux +# +COMPATIBLE_MACHINE:musca-b1 = "(^$)" +COMPATIBLE_MACHINE:musca-s1 = "(^$)" + +# +# N1SDP KMACHINE +# +FILESEXTRAPATHS:prepend:n1sdp := "${THISDIR}/linux-yocto-5.15/n1sdp:" +COMPATIBLE_MACHINE:n1sdp = "n1sdp" +KBUILD_DEFCONFIG:n1sdp = "defconfig" +KCONFIG_MODE:n1sdp = "--alldefconfig" +FILESEXTRAPATHS:prepend:n1sdp := "${ARMBSPFILESPATHS}" +SRC_URI:append:n1sdp = " \ + file://0001-iommu-arm-smmu-v3-workaround-for-ATC_INV_SIZE_ALL-in.patch \ + file://0002-n1sdp-pci_quirk-add-acs-override-for-PCI-devices.patch \ + file://0003-pcie-Add-quirk-for-the-Arm-Neoverse-N1SDP-platform.patch \ + file://0004-n1sdp-pcie-add-quirk-support-enabling-remote-chip-PC.patch \ + file://0005-arm64-kpti-Whitelist-early-Arm-Neoverse-N1-revisions.patch \ + file://enable-realtek-R8169.cfg \ + file://enable-usb_conn_gpio.cfg \ + file://usb_xhci_pci_renesas.cfg \ + " +# Since we use the intree defconfig and the preempt-rt turns off some configs +# do_kernel_configcheck will display warnings. So, lets disable it. +KCONF_AUDIT_LEVEL:n1sdp:pn-linux-yocto-rt = "0" + +# +# SGI575 KMACHINE +# +COMPATIBLE_MACHINE:sgi575 = "sgi575" +KBUILD_DEFCONFIG:sgi575 = "defconfig" +KCONFIG_MODE:sgi575 = "--alldefconfig" + +# +# Total Compute (TC0/TC1) KMACHINE +# +COMPATIBLE_MACHINE:tc = "(tc0|tc1)" +KCONFIG_MODE:tc = "--alldefconfig" +FILESEXTRAPATHS:prepend:tc := "${ARMBSPFILESPATHS}:${THISDIR}/linux-arm64-ack-5.10/tc:" +SRC_URI:append:tc = " \ + file://gki_defconfig \ + file://0001-drm-Add-component-aware-simple-encoder.patch \ + file://0002-drm-arm-komeda-add-RENDER-capability-to-the-device-n.patch \ + file://0003-dt-bindings-mailbox-arm-mhuv2-Add-bindings.patch \ + file://0004-mailbox-arm_mhuv2-Add-driver.patch \ + file://0005-mailbox-arm_mhuv2-Fix-sparse-warnings.patch \ + file://0006-mailbox-arm_mhuv2-make-remove-callback-return-void.patch \ + file://0007-mailbox-arm_mhuv2-Skip-calling-kfree-with-invalid-po.patch \ + file://0008-firmware-arm_ffa-Backport-of-arm_ffa-driver.patch \ + file://0009-tee-add-sec_world_id-to-struct-tee_shm.patch \ + file://0010-optee-simplify-optee_release.patch \ + file://0011-optee-sync-OP-TEE-headers.patch \ + file://0012-optee-refactor-driver-with-internal-callbacks.patch \ + file://0013-optee-add-a-FF-A-memory-pool.patch \ + file://0014-optee-add-FF-A-support.patch \ + file://0015-coresight-etm4x-Save-restore-TRFCR_EL1.patch \ + file://0016-coresight-etm4x-Use-Trace-Filtering-controls-dynamic.patch \ + file://0017-perf-arm-cmn-Use-irq_set_affinity.patch \ + file://0018-perf-arm-cmn-Fix-CPU-hotplug-unregistration.patch \ + file://0019-perf-arm-cmn-Account-for-NUMA-affinity.patch \ + file://0020-perf-arm-cmn-Drop-compile-test-restriction.patch \ + file://0021-perf-arm-cmn-Refactor-node-ID-handling.patch \ + file://0022-perf-arm-cmn-Streamline-node-iteration.patch \ + file://0023-drivers-perf-arm-cmn-Add-space-after.patch \ + file://0024-perf-arm-cmn-Refactor-DTM-handling.patch \ + file://0025-perf-arm-cmn-Optimise-DTM-counter-reads.patch \ + file://0026-perf-arm-cmn-Optimise-DTC-counter-accesses.patch \ + file://0027-perf-arm-cmn-Move-group-validation-data-off-stack.patch \ + file://0028-perf-arm-cmn-Demarcate-CMN-600-specifics.patch \ + file://0029-perf-arm-cmn-Support-new-IP-features.patch \ + file://0030-perf-arm-cmn-Add-CI-700-Support.patch \ + file://0031-firmware-arm_ffa-Fix-uuid-argument-passed-to-ffa_par.patch \ + file://0032-firmware-arm_ffa-Add-ffa_dev_get_drvdata.patch \ + file://0033-firmware-arm_ffa-extern-ffa_bus_type.patch \ + file://0034-firmware-arm_ffa-Fix-FFA_MEM_SHARE-and-FFA_MEM_FRAG_.patch \ + file://0035-ANDROID-trusty-Backport-of-trusty-driver.patch \ + file://0036-ANDROID-trusty-Remove-FFA-specific-initilization.patch \ + file://0037-ANDROID-trusty-Rename-transfer-memory-function-to-le.patch \ + file://0038-ANDROID-trusty-Separate-out-SMC-based-transport.patch \ + file://0039-ANDROID-trusty-Modify-device-compatible-string.patch \ + file://0040-ANDROID-trusty-Add-transport-descriptor.patch \ + file://0041-ANDROID-trusty-Add-trusty-ffa-driver.patch \ + file://0042-ANDROID-trusty-ffa-Add-support-for-FFA-memory-operat.patch \ + file://0043-ANDROID-trusty-ffa-Enable-FFA-transport-for-both-mem.patch \ + file://0044-ANDROID-trusty-Make-trusty-transports-configurable.patch \ + " +KERNEL_FEATURES:append:tc = " bsp/arm-platforms/tc.scc" +KERNEL_FEATURES:append:tc1 = " bsp/arm-platforms/tc-autofdo.scc" diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0001-drm-Add-component-aware-simple-encoder.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0001-drm-Add-component-aware-simple-encoder.patch new file mode 100644 index 0000000000..158603495a --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0001-drm-Add-component-aware-simple-encoder.patch @@ -0,0 +1,365 @@ +From 39e6b51150c36dd659b85de0c4339594da389da9 Mon Sep 17 00:00:00 2001 +From: Tushar Khandelwal <tushar.khandelwal@arm.com> +Date: Tue, 16 Jun 2020 12:39:06 +0000 +Subject: [PATCH 01/22] drm: Add component-aware simple encoder + +This is a simple DRM encoder that gets its connector timings information +from a OF subnode in the device tree and exposes that as a "discovered" +panel. It can be used together with component-based DRM drivers in an +emulated environment where no real encoder or connector hardware exists +and the display output is configured outside the kernel. + +Signed-off-by: Tushar Khandelwal <tushar.khandelwal@arm.com> + +Upstream-Status: Backport [https://git.linaro.org/landing-teams/working/arm/kernel-release.git/commit/?h=latest-armlt&id=15283f7be4b1e586702551e85b4caf06531ac2fc] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + drivers/gpu/drm/Kconfig | 11 + + drivers/gpu/drm/Makefile | 2 + + drivers/gpu/drm/drm_virtual_encoder.c | 299 ++++++++++++++++++++++++++ + 3 files changed, 312 insertions(+) + create mode 100644 drivers/gpu/drm/drm_virtual_encoder.c + +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index ca868271f4c4..6ae8ba3ca7b3 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -300,6 +300,17 @@ config DRM_VKMS + + If M is selected the module will be called vkms. + ++config DRM_VIRT_ENCODER ++ tristate "Virtual OF-based encoder" ++ depends on DRM && OF ++ select VIDEOMODE_HELPERS ++ help ++ Choose this option to get a virtual encoder and its associated ++ connector that will use the device tree to read the display ++ timings information. If M is selected the module will be called ++ drm_vencoder. ++ ++ + source "drivers/gpu/drm/exynos/Kconfig" + + source "drivers/gpu/drm/rockchip/Kconfig" +diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile +index 81569009f884..a3429152c613 100644 +--- a/drivers/gpu/drm/Makefile ++++ b/drivers/gpu/drm/Makefile +@@ -56,6 +56,8 @@ drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o + + obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o + obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/ ++drm_vencoder-y := drm_virtual_encoder.o ++obj-$(CONFIG_DRM_VIRT_ENCODER) += drm_vencoder.o + + obj-$(CONFIG_DRM) += drm.o + obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o +diff --git a/drivers/gpu/drm/drm_virtual_encoder.c b/drivers/gpu/drm/drm_virtual_encoder.c +new file mode 100644 +index 000000000000..2f65c6b47d00 +--- /dev/null ++++ b/drivers/gpu/drm/drm_virtual_encoder.c +@@ -0,0 +1,299 @@ ++/* ++ * Copyright (C) 2016 ARM Limited ++ * Author: Liviu Dudau <Liviu.Dudau@arm.com> ++ * ++ * Dummy encoder and connector that use the OF to "discover" the attached ++ * display timings. Can be used in situations where the encoder and connector's ++ * functionality are emulated and no setup steps are needed, or to describe ++ * attached panels for which no driver exists but can be used without ++ * additional hardware setup. ++ * ++ * The encoder also uses the component framework so that it can be a quick ++ * replacement for existing drivers when testing in an emulated environment. ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file COPYING in the main directory of this archive ++ * for more details. ++ * ++ */ ++ ++#include <drm/drm_crtc.h> ++#include <drm/drm_atomic_helper.h> ++#include <drm/drm_crtc_helper.h> ++#include <drm/drm_probe_helper.h> ++#include <drm/drm_print.h> ++#include <linux/platform_device.h> ++#include <drm/drm_of.h> ++#include <linux/component.h> ++#include <video/display_timing.h> ++#include <video/of_display_timing.h> ++#include <video/videomode.h> ++ ++struct drm_virt_priv { ++ struct drm_connector connector; ++ struct drm_encoder encoder; ++ struct display_timings *timings; ++}; ++ ++#define connector_to_drm_virt_priv(x) \ ++ container_of(x, struct drm_virt_priv, connector) ++ ++#define encoder_to_drm_virt_priv(x) \ ++ container_of(x, struct drm_virt_priv, encoder) ++ ++static void drm_virtcon_destroy(struct drm_connector *connector) ++{ ++ struct drm_virt_priv *conn = connector_to_drm_virt_priv(connector); ++ ++ drm_connector_cleanup(connector); ++ display_timings_release(conn->timings); ++} ++ ++static enum drm_connector_status ++drm_virtcon_detect(struct drm_connector *connector, bool force) ++{ ++ return connector_status_connected; ++} ++ ++static const struct drm_connector_funcs drm_virtcon_funcs = { ++ .reset = drm_atomic_helper_connector_reset, ++ .detect = drm_virtcon_detect, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = drm_virtcon_destroy, ++ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, ++}; ++ ++static int drm_virtcon_get_modes(struct drm_connector *connector) ++{ ++ struct drm_virt_priv *conn = connector_to_drm_virt_priv(connector); ++ struct display_timings *timings = conn->timings; ++ int i; ++ ++ for (i = 0; i < timings->num_timings; i++) { ++ struct drm_display_mode *mode = drm_mode_create(connector->dev); ++ struct videomode vm; ++ ++ if (videomode_from_timings(timings, &vm, i)) ++ break; ++ ++ drm_display_mode_from_videomode(&vm, mode); ++ mode->type = DRM_MODE_TYPE_DRIVER; ++ if (timings->native_mode == i) ++ mode->type = DRM_MODE_TYPE_PREFERRED; ++ ++ drm_mode_set_name(mode); ++ drm_mode_probed_add(connector, mode); ++ } ++ ++ return i; ++} ++ ++static int drm_virtcon_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ return MODE_OK; ++} ++ ++struct drm_encoder *drm_virtcon_best_encoder(struct drm_connector *connector) ++{ ++ struct drm_virt_priv *priv = connector_to_drm_virt_priv(connector); ++ ++ return &priv->encoder; ++} ++ ++struct drm_encoder * ++drm_virtcon_atomic_best_encoder(struct drm_connector *connector, ++ struct drm_connector_state *connector_state) ++{ ++ struct drm_virt_priv *priv = connector_to_drm_virt_priv(connector); ++ ++ return &priv->encoder; ++} ++ ++static const struct drm_connector_helper_funcs drm_virtcon_helper_funcs = { ++ .get_modes = drm_virtcon_get_modes, ++ .mode_valid = drm_virtcon_mode_valid, ++ .best_encoder = drm_virtcon_best_encoder, ++ .atomic_best_encoder = drm_virtcon_atomic_best_encoder, ++}; ++ ++static void drm_vencoder_destroy(struct drm_encoder *encoder) ++{ ++ drm_encoder_cleanup(encoder); ++} ++ ++static const struct drm_encoder_funcs drm_vencoder_funcs = { ++ .destroy = drm_vencoder_destroy, ++}; ++ ++static void drm_vencoder_dpms(struct drm_encoder *encoder, int mode) ++{ ++ /* nothing needed */ ++} ++ ++static bool drm_vencoder_mode_fixup(struct drm_encoder *encoder, ++ const struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* nothing needed */ ++ return true; ++} ++ ++static void drm_vencoder_prepare(struct drm_encoder *encoder) ++{ ++ drm_vencoder_dpms(encoder, DRM_MODE_DPMS_OFF); ++} ++ ++static void drm_vencoder_commit(struct drm_encoder *encoder) ++{ ++ drm_vencoder_dpms(encoder, DRM_MODE_DPMS_ON); ++} ++ ++static void drm_vencoder_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* nothing needed */ ++} ++ ++static const struct drm_encoder_helper_funcs drm_vencoder_helper_funcs = { ++ .dpms = drm_vencoder_dpms, ++ .mode_fixup = drm_vencoder_mode_fixup, ++ .prepare = drm_vencoder_prepare, ++ .commit = drm_vencoder_commit, ++ .mode_set = drm_vencoder_mode_set, ++}; ++ ++static int drm_vencoder_bind(struct device *dev, struct device *master, ++ void *data) ++{ ++ struct drm_encoder *encoder; ++ struct drm_virt_priv *con; ++ struct drm_connector *connector; ++ struct drm_device *drm = data; ++ u32 crtcs = 0; ++ int ret; ++ ++ con = devm_kzalloc(dev, sizeof(*con), GFP_KERNEL); ++ if (!con) ++ return -ENOMEM; ++ ++ dev_set_drvdata(dev, con); ++ connector = &con->connector; ++ encoder = &con->encoder; ++ ++ if (dev->of_node) { ++ struct drm_bridge *bridge; ++ crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); ++ bridge = of_drm_find_bridge(dev->of_node); ++ if (bridge) { ++ ret = drm_bridge_attach(encoder, bridge, NULL, 0); ++ if (ret) { ++ DRM_ERROR("Failed to initialize bridge\n"); ++ return ret; ++ } ++ } ++ con->timings = of_get_display_timings(dev->of_node); ++ if (!con->timings) { ++ dev_err(dev, "failed to get display panel timings\n"); ++ return ENXIO; ++ } ++ } ++ ++ /* If no CRTCs were found, fall back to the old encoder's behaviour */ ++ if (crtcs == 0) { ++ dev_warn(dev, "Falling back to first CRTC\n"); ++ crtcs = 1 << 0; ++ } ++ ++ encoder->possible_crtcs = crtcs ? crtcs : 1; ++ encoder->possible_clones = 0; ++ ++ ret = drm_encoder_init(drm, encoder, &drm_vencoder_funcs, ++ DRM_MODE_ENCODER_VIRTUAL, NULL); ++ if (ret) ++ goto encoder_init_err; ++ ++ drm_encoder_helper_add(encoder, &drm_vencoder_helper_funcs); ++ ++ /* bogus values, pretend we're a 24" screen for DPI calculations */ ++ connector->display_info.width_mm = 519; ++ connector->display_info.height_mm = 324; ++ connector->interlace_allowed = false; ++ connector->doublescan_allowed = false; ++ connector->polled = 0; ++ ++ ret = drm_connector_init(drm, connector, &drm_virtcon_funcs, ++ DRM_MODE_CONNECTOR_VIRTUAL); ++ if (ret) ++ goto connector_init_err; ++ ++ drm_connector_helper_add(connector, &drm_virtcon_helper_funcs); ++ ++ drm_connector_register(connector); ++ ++ ret = drm_connector_attach_encoder(connector, encoder); ++ if (ret) ++ goto attach_err; ++ ++ return ret; ++ ++attach_err: ++ drm_connector_unregister(connector); ++ drm_connector_cleanup(connector); ++connector_init_err: ++ drm_encoder_cleanup(encoder); ++encoder_init_err: ++ display_timings_release(con->timings); ++ ++ return ret; ++}; ++ ++static void drm_vencoder_unbind(struct device *dev, struct device *master, ++ void *data) ++{ ++ struct drm_virt_priv *con = dev_get_drvdata(dev); ++ ++ drm_connector_unregister(&con->connector); ++ drm_connector_cleanup(&con->connector); ++ drm_encoder_cleanup(&con->encoder); ++ display_timings_release(con->timings); ++} ++ ++static const struct component_ops drm_vencoder_ops = { ++ .bind = drm_vencoder_bind, ++ .unbind = drm_vencoder_unbind, ++}; ++ ++static int drm_vencoder_probe(struct platform_device *pdev) ++{ ++ return component_add(&pdev->dev, &drm_vencoder_ops); ++} ++ ++static int drm_vencoder_remove(struct platform_device *pdev) ++{ ++ component_del(&pdev->dev, &drm_vencoder_ops); ++ return 0; ++} ++ ++static const struct of_device_id drm_vencoder_of_match[] = { ++ { .compatible = "drm,virtual-encoder", }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, drm_vencoder_of_match); ++ ++static struct platform_driver drm_vencoder_driver = { ++ .probe = drm_vencoder_probe, ++ .remove = drm_vencoder_remove, ++ .driver = { ++ .name = "drm_vencoder", ++ .of_match_table = drm_vencoder_of_match, ++ }, ++}; ++ ++module_platform_driver(drm_vencoder_driver); ++ ++MODULE_AUTHOR("Liviu Dudau"); ++MODULE_DESCRIPTION("Virtual DRM Encoder"); ++MODULE_LICENSE("GPL v2"); +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0002-drm-arm-komeda-add-RENDER-capability-to-the-device-n.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0002-drm-arm-komeda-add-RENDER-capability-to-the-device-n.patch new file mode 100644 index 0000000000..77519f14aa --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0002-drm-arm-komeda-add-RENDER-capability-to-the-device-n.patch @@ -0,0 +1,32 @@ +From 1b2c200673b4a08324f3a6575b30bd16030ed586 Mon Sep 17 00:00:00 2001 +From: Tushar Khandelwal <tushar.khandelwal@arm.com> +Date: Wed, 17 Jun 2020 10:49:26 +0000 +Subject: [PATCH 02/22] drm: arm: komeda: add RENDER capability to the device + node + +this is required to make this driver work with android framework + +Signed-off-by: Tushar Khandelwal <tushar.khandelwal@arm.com> + +Upstream-Status: Inappropriate [Product specific configuration] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + drivers/gpu/drm/arm/display/komeda/komeda_kms.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +index 1f6682032ca4..9d1a1942e673 100644 +--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c ++++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +@@ -59,7 +59,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) + } + + static struct drm_driver komeda_kms_driver = { +- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, ++ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_RENDER, + .lastclose = drm_fb_helper_lastclose, + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create), + .fops = &komeda_cma_fops, +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0003-dt-bindings-mailbox-arm-mhuv2-Add-bindings.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0003-dt-bindings-mailbox-arm-mhuv2-Add-bindings.patch new file mode 100644 index 0000000000..1da75ea84e --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0003-dt-bindings-mailbox-arm-mhuv2-Add-bindings.patch @@ -0,0 +1,241 @@ +From cdda49168d42c897574388356555f8130c021bb5 Mon Sep 17 00:00:00 2001 +From: Viresh Kumar <viresh.kumar@linaro.org> +Date: Tue, 17 Nov 2020 15:32:05 +0530 +Subject: [PATCH 03/22] dt-bindings: mailbox : arm,mhuv2: Add bindings + +This patch adds device tree binding for ARM Message Handling Unit (MHU) +controller version 2. + +Based on earlier work by Morten Borup Petersen. + +Reviewed-by: Rob Herring <robh@kernel.org> +Co-developed-by: Tushar Khandelwal <tushar.khandelwal@arm.com> +Signed-off-by: Tushar Khandelwal <tushar.khandelwal@arm.com> +Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> +Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org> + +Upstream-Status: Backport [https://lkml.org/lkml/2020/11/17/234] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + .../bindings/mailbox/arm,mhuv2.yaml | 209 ++++++++++++++++++ + 1 file changed, 209 insertions(+) + create mode 100644 Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml + +diff --git a/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml b/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml +new file mode 100644 +index 000000000000..6608545ea66f +--- /dev/null ++++ b/Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml +@@ -0,0 +1,209 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/mailbox/arm,mhuv2.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: ARM MHUv2 Mailbox Controller ++ ++maintainers: ++ - Tushar Khandelwal <tushar.khandelwal@arm.com> ++ - Viresh Kumar <viresh.kumar@linaro.org> ++ ++description: | ++ The Arm Message Handling Unit (MHU) Version 2 is a mailbox controller that has ++ between 1 and 124 channel windows (each 32-bit wide) to provide unidirectional ++ communication with remote processor(s), where the number of channel windows ++ are implementation dependent. ++ ++ Given the unidirectional nature of the controller, an MHUv2 mailbox may only ++ be written to or read from. If a pair of MHU controllers is implemented ++ between two processing elements to provide bidirectional communication, these ++ must be specified as two separate mailboxes. ++ ++ If the interrupts property is present in device tree node, then its treated as ++ a "receiver" mailbox, otherwise a "sender". ++ ++ An MHU controller must be specified along with the supported transport ++ protocols. The transport protocols determine the method of data transmission ++ as well as the number of provided mailbox channels. ++ ++ Following are the possible transport protocols. ++ ++ - Data-transfer: Each transfer is made of one or more words, using one or more ++ channel windows. ++ ++ - Doorbell: Each transfer is made up of single bit flag, using any one of the ++ bits in a channel window. A channel window can support up to 32 doorbells ++ and the entire window shall be used in doorbell protocol. Optionally, data ++ may be transmitted through a shared memory region, wherein the MHU is used ++ strictly as an interrupt generation mechanism but that is out of the scope ++ of these bindings. ++ ++# We need a select here so we don't match all nodes with 'arm,primecell' ++select: ++ properties: ++ compatible: ++ contains: ++ enum: ++ - arm,mhuv2-tx ++ - arm,mhuv2-rx ++ required: ++ - compatible ++ ++properties: ++ compatible: ++ oneOf: ++ - description: Sender mode ++ items: ++ - const: arm,mhuv2-tx ++ - const: arm,primecell ++ ++ - description: Receiver-mode ++ items: ++ - const: arm,mhuv2-rx ++ - const: arm,primecell ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ description: | ++ The MHUv2 controller always implements an interrupt in the "receiver" ++ mode, while the interrupt in the "sender" mode was not available in the ++ version MHUv2.0, but the later versions do have it. ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-names: ++ maxItems: 1 ++ ++ arm,mhuv2-protocols: ++ $ref: /schemas/types.yaml#/definitions/uint32-matrix ++ description: | ++ The MHUv2 controller may contain up to 124 channel windows (each 32-bit ++ wide). The hardware and the DT bindings allows any combination of those to ++ be used for various transport protocols. ++ ++ This property allows a platform to describe how these channel windows are ++ used in various transport protocols. The entries in this property shall be ++ present as an array of tuples, where each tuple describes details about ++ one of the transport protocol being implemented over some channel ++ window(s). ++ ++ The first field of a tuple signifies the transfer protocol, 0 is reserved ++ for doorbell protocol, and 1 is reserved for data-transfer protocol. ++ Using any other value in the first field of a tuple makes it invalid. ++ ++ The second field of a tuple signifies the number of channel windows where ++ the protocol would be used and should be set to a non zero value. For ++ doorbell protocol this field signifies the number of 32-bit channel ++ windows that implement the doorbell protocol. For data-transfer protocol, ++ this field signifies the number of 32-bit channel windows that implement ++ the data-transfer protocol. ++ ++ The total number of channel windows specified here shouldn't be more than ++ the ones implemented by the platform, though one can specify lesser number ++ of windows here than what the platform implements. ++ ++ mhu: mailbox@2b1f0000 { ++ ... ++ ++ arm,mhuv2-protocols = <0 2>, <1 1>, <1 5>, <1 7>; ++ } ++ ++ The above example defines the protocols of an ARM MHUv2 mailbox ++ controller, where a total of 15 channel windows are used. The first two ++ windows are used in doorbell protocol (64 doorbells), followed by 1, 5 and ++ 7 windows (separately) used in data-transfer protocol. ++ ++ minItems: 1 ++ maxItems: 124 ++ items: ++ items: ++ - enum: [ 0, 1 ] ++ - minimum: 0 ++ maximum: 124 ++ ++ ++ '#mbox-cells': ++ description: | ++ It is always set to 2. The first argument in the consumers 'mboxes' ++ property represents the channel window group, which may be used in ++ doorbell, or data-transfer protocol, and the second argument (only ++ relevant in doorbell protocol, should be 0 otherwise) represents the ++ doorbell number within the 32 bit wide channel window. ++ ++ From the example given above for arm,mhuv2-protocols, here is how a client ++ node can reference them. ++ ++ mboxes = <&mhu 0 5>; // Channel Window Group 0, doorbell 5. ++ mboxes = <&mhu 1 7>; // Channel Window Group 1, doorbell 7. ++ mboxes = <&mhu 2 0>; // Channel Window Group 2, data transfer protocol with 1 window. ++ mboxes = <&mhu 3 0>; // Channel Window Group 3, data transfer protocol with 5 windows. ++ mboxes = <&mhu 4 0>; // Channel Window Group 4, data transfer protocol with 7 windows. ++ ++ const: 2 ++ ++if: ++ # Interrupt is compulsory for receiver ++ properties: ++ compatible: ++ contains: ++ const: arm,mhuv2-rx ++then: ++ required: ++ - interrupts ++ ++required: ++ - compatible ++ - reg ++ - '#mbox-cells' ++ - arm,mhuv2-protocols ++ ++additionalProperties: false ++ ++examples: ++ # Multiple transport protocols implemented by the mailbox controllers ++ - | ++ soc { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ mhu_tx: mailbox@2b1f0000 { ++ #mbox-cells = <2>; ++ compatible = "arm,mhuv2-tx", "arm,primecell"; ++ reg = <0 0x2b1f0000 0 0x1000>; ++ clocks = <&clock 0>; ++ clock-names = "apb_pclk"; ++ interrupts = <0 45 4>; ++ arm,mhuv2-protocols = <1 5>, <1 2>, <1 5>, <1 7>, <0 2>; ++ }; ++ ++ mhu_rx: mailbox@2b1f1000 { ++ #mbox-cells = <2>; ++ compatible = "arm,mhuv2-rx", "arm,primecell"; ++ reg = <0 0x2b1f1000 0 0x1000>; ++ clocks = <&clock 0>; ++ clock-names = "apb_pclk"; ++ interrupts = <0 46 4>; ++ arm,mhuv2-protocols = <1 1>, <1 7>, <0 2>; ++ }; ++ ++ mhu_client: scb@2e000000 { ++ compatible = "fujitsu,mb86s70-scb-1.0"; ++ reg = <0 0x2e000000 0 0x4000>; ++ ++ mboxes = ++ //data-transfer protocol with 5 windows, mhu-tx ++ <&mhu_tx 2 0>, ++ //data-transfer protocol with 7 windows, mhu-tx ++ <&mhu_tx 3 0>, ++ //doorbell protocol channel 4, doorbell 27, mhu-tx ++ <&mhu_tx 4 27>, ++ //data-transfer protocol with 1 window, mhu-rx ++ <&mhu_rx 0 0>; ++ }; ++ }; +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0004-mailbox-arm_mhuv2-Add-driver.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0004-mailbox-arm_mhuv2-Add-driver.patch new file mode 100644 index 0000000000..a4dd9612a1 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0004-mailbox-arm_mhuv2-Add-driver.patch @@ -0,0 +1,1256 @@ +From eadd9235d084da8022df2d232c90590f2160e433 Mon Sep 17 00:00:00 2001 +From: Viresh Kumar <viresh.kumar@linaro.org> +Date: Tue, 17 Nov 2020 15:32:06 +0530 +Subject: [PATCH 04/22] mailbox: arm_mhuv2: Add driver + +This adds driver for the ARM MHUv2 (Message Handling Unit) mailbox +controller. + +This is based on the accepted DT bindings of the controller and supports +combination of both transport protocols, i.e. doorbell and data-transfer. + +Transmitting and receiving data through the mailbox framework is done +through struct arm_mhuv2_mbox_msg. + +Based on the initial work done by Morten Borup Petersen from ARM. + +Co-developed-by: Tushar Khandelwal <tushar.khandelwal@arm.com> +Signed-off-by: Tushar Khandelwal <tushar.khandelwal@arm.com> +Tested-by: Usama Arif <usama.arif@arm.com> +Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> +Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org> + +Upstream-Status: Backport [https://www.lkml.org/lkml/2020/11/17/235] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + MAINTAINERS | 9 + + drivers/mailbox/Kconfig | 7 + + drivers/mailbox/Makefile | 2 + + drivers/mailbox/arm_mhuv2.c | 1136 +++++++++++++++++++++ + include/linux/mailbox/arm_mhuv2_message.h | 20 + + 5 files changed, 1174 insertions(+) + create mode 100644 drivers/mailbox/arm_mhuv2.c + create mode 100644 include/linux/mailbox/arm_mhuv2_message.h + +diff --git a/MAINTAINERS b/MAINTAINERS +index 354831907474..5234423c477a 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -10459,6 +10459,15 @@ F: drivers/mailbox/ + F: include/linux/mailbox_client.h + F: include/linux/mailbox_controller.h + ++MAILBOX ARM MHUv2 ++M: Viresh Kumar <viresh.kumar@linaro.org> ++M: Tushar Khandelwal <Tushar.Khandelwal@arm.com> ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/mailbox/arm_mhuv2.c ++F: include/linux/mailbox/arm_mhuv2_message.h ++F: Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml ++ + MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 + M: Michael Kerrisk <mtk.manpages@gmail.com> + L: linux-man@vger.kernel.org +diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig +index 05b1009e2820..3c0ea96a0a8b 100644 +--- a/drivers/mailbox/Kconfig ++++ b/drivers/mailbox/Kconfig +@@ -16,6 +16,13 @@ config ARM_MHU + The controller has 3 mailbox channels, the last of which can be + used in Secure mode only. + ++config ARM_MHU_V2 ++ tristate "ARM MHUv2 Mailbox" ++ depends on ARM_AMBA ++ help ++ Say Y here if you want to build the ARM MHUv2 controller driver, ++ which provides unidirectional mailboxes between processing elements. ++ + config IMX_MBOX + tristate "i.MX Mailbox" + depends on ARCH_MXC || COMPILE_TEST +diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile +index 2e06e02b2e03..7194fa92c787 100644 +--- a/drivers/mailbox/Makefile ++++ b/drivers/mailbox/Makefile +@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o + + obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o + ++obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o ++ + obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o + + obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o +diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c +new file mode 100644 +index 000000000000..67fb10885bb4 +--- /dev/null ++++ b/drivers/mailbox/arm_mhuv2.c +@@ -0,0 +1,1136 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * ARM Message Handling Unit Version 2 (MHUv2) driver. ++ * ++ * Copyright (C) 2020 ARM Ltd. ++ * Copyright (C) 2020 Linaro Ltd. ++ * ++ * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32 ++ * bit long) and the driver allows any combination of both the transport ++ * protocol modes: data-transfer and doorbell, to be used on those channel ++ * windows. ++ * ++ * The transport protocols should be specified in the device tree entry for the ++ * device. The transport protocols determine how the underlying hardware ++ * resources of the device are utilized when transmitting data. Refer to the ++ * device tree bindings of the ARM MHUv2 controller for more details. ++ * ++ * The number of registered mailbox channels is dependent on both the underlying ++ * hardware - mainly the number of channel windows implemented by the platform, ++ * as well as the selected transport protocols. ++ * ++ * The MHUv2 controller can work both as a sender and receiver, but the driver ++ * and the DT bindings support unidirectional transfers for better allocation of ++ * the channels. That is, this driver will be probed for two separate devices ++ * for each mailbox controller, a sender device and a receiver device. ++ */ ++ ++#include <linux/amba/bus.h> ++#include <linux/interrupt.h> ++#include <linux/mailbox_controller.h> ++#include <linux/mailbox/arm_mhuv2_message.h> ++#include <linux/module.h> ++#include <linux/of_address.h> ++#include <linux/spinlock.h> ++ ++/* ====== MHUv2 Registers ====== */ ++ ++/* Maximum number of channel windows */ ++#define MHUV2_CH_WN_MAX 124 ++/* Number of combined interrupt status registers */ ++#define MHUV2_CMB_INT_ST_REG_CNT 4 ++#define MHUV2_STAT_BYTES (sizeof(u32)) ++#define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__) ++ ++#define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1) ++#define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols" ++ ++/* Register Message Handling Unit Configuration fields */ ++struct mhu_cfg_t { ++ u32 num_ch : 7; ++ u32 pad : 25; ++} __packed; ++ ++/* register Interrupt Status fields */ ++struct int_st_t { ++ u32 nr2r : 1; ++ u32 r2nr : 1; ++ u32 pad : 30; ++} __packed; ++ ++/* Register Interrupt Clear fields */ ++struct int_clr_t { ++ u32 nr2r : 1; ++ u32 r2nr : 1; ++ u32 pad : 30; ++} __packed; ++ ++/* Register Interrupt Enable fields */ ++struct int_en_t { ++ u32 r2nr : 1; ++ u32 nr2r : 1; ++ u32 chcomb : 1; ++ u32 pad : 29; ++} __packed; ++ ++/* Register Implementer Identification fields */ ++struct iidr_t { ++ u32 implementer : 12; ++ u32 revision : 4; ++ u32 variant : 4; ++ u32 product_id : 12; ++} __packed; ++ ++/* Register Architecture Identification Register fields */ ++struct aidr_t { ++ u32 arch_minor_rev : 4; ++ u32 arch_major_rev : 4; ++ u32 pad : 24; ++} __packed; ++ ++/* Sender Channel Window fields */ ++struct mhu2_send_ch_wn_reg { ++ u32 stat; ++ u8 pad1[0x0C - 0x04]; ++ u32 stat_set; ++ u32 int_st; ++ u32 int_clr; ++ u32 int_en; ++ u8 pad2[0x20 - 0x1C]; ++} __packed; ++ ++/* Sender frame register fields */ ++struct mhu2_send_frame_reg { ++ struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX]; ++ struct mhu_cfg_t mhu_cfg; ++ u32 resp_cfg; ++ u32 access_request; ++ u32 access_ready; ++ struct int_st_t int_st; ++ struct int_clr_t int_clr; ++ struct int_en_t int_en; ++ u32 reserved0; ++ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT]; ++ u8 pad[0xFC8 - 0xFB0]; ++ struct iidr_t iidr; ++ struct aidr_t aidr; ++} __packed; ++ ++/* Receiver Channel Window fields */ ++struct mhu2_recv_ch_wn_reg { ++ u32 stat; ++ u32 stat_masked; ++ u32 stat_clear; ++ u8 reserved0[0x10 - 0x0C]; ++ u32 mask; ++ u32 mask_set; ++ u32 mask_clear; ++ u8 pad[0x20 - 0x1C]; ++} __packed; ++ ++/* Receiver frame register fields */ ++struct mhu2_recv_frame_reg { ++ struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX]; ++ struct mhu_cfg_t mhu_cfg; ++ u8 reserved0[0xF90 - 0xF84]; ++ struct int_st_t int_st; ++ struct int_clr_t int_clr; ++ struct int_en_t int_en; ++ u32 pad; ++ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT]; ++ u8 reserved2[0xFC8 - 0xFB0]; ++ struct iidr_t iidr; ++ struct aidr_t aidr; ++} __packed; ++ ++ ++/* ====== MHUv2 data structures ====== */ ++ ++enum mhuv2_transport_protocol { ++ DOORBELL = 0, ++ DATA_TRANSFER = 1 ++}; ++ ++enum mhuv2_frame { ++ RECEIVER_FRAME, ++ SENDER_FRAME ++}; ++ ++/** ++ * struct mhuv2 - MHUv2 mailbox controller data ++ * ++ * @mbox: Mailbox controller belonging to the MHU frame. ++ * @send/recv: Base address of the register mapping region. ++ * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME. ++ * @irq: Interrupt. ++ * @windows: Channel windows implemented by the platform. ++ * @minor: Minor version of the controller. ++ * @length: Length of the protocols array in bytes. ++ * @protocols: Raw protocol information, derived from device tree. ++ * @doorbell_pending_lock: spinlock required for correct operation of Tx ++ * interrupt for doorbells. ++ */ ++struct mhuv2 { ++ struct mbox_controller mbox; ++ union { ++ struct mhu2_send_frame_reg __iomem *send; ++ struct mhu2_recv_frame_reg __iomem *recv; ++ }; ++ enum mhuv2_frame frame; ++ unsigned int irq; ++ unsigned int windows; ++ unsigned int minor; ++ unsigned int length; ++ u32 *protocols; ++ ++ spinlock_t doorbell_pending_lock; ++}; ++ ++#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox) ++ ++/** ++ * struct mhuv2_protocol_ops - MHUv2 operations ++ * ++ * Each transport protocol must provide an implementation of the operations ++ * provided here. ++ * ++ * @rx_startup: Startup callback for receiver. ++ * @rx_shutdown: Shutdown callback for receiver. ++ * @read_data: Reads and clears newly available data. ++ * @tx_startup: Startup callback for receiver. ++ * @tx_shutdown: Shutdown callback for receiver. ++ * @last_tx_done: Report back if the last tx is completed or not. ++ * @send_data: Send data to the receiver. ++ */ ++struct mhuv2_protocol_ops { ++ int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan); ++ void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan); ++ void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan); ++ ++ void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan); ++ void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan); ++ int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan); ++ int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg); ++}; ++ ++/* ++ * MHUv2 mailbox channel's private information ++ * ++ * @ops: protocol specific ops for the channel. ++ * @ch_wn_idx: Channel window index allocated to the channel. ++ * @windows: Total number of windows consumed by the channel, only relevant ++ * in DATA_TRANSFER protocol. ++ * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant ++ * in DOORBELL protocol. ++ * @pending: Flag indicating pending doorbell interrupt, only relevant in ++ * DOORBELL protocol. ++ */ ++struct mhuv2_mbox_chan_priv { ++ const struct mhuv2_protocol_ops *ops; ++ u32 ch_wn_idx; ++ union { ++ u32 windows; ++ struct { ++ u32 doorbell; ++ u32 pending; ++ }; ++ }; ++}; ++ ++/* Macro for reading a bitfield within a physically mapped packed struct */ ++#define readl_relaxed_bitfield(_regptr, _field) \ ++ ({ \ ++ u32 _regval; \ ++ _regval = readl_relaxed((_regptr)); \ ++ (*(typeof((_regptr)))(&_regval))._field; \ ++ }) ++ ++/* Macro for writing a bitfield within a physically mapped packed struct */ ++#define writel_relaxed_bitfield(_value, _regptr, _field) \ ++ ({ \ ++ u32 _regval; \ ++ _regval = readl_relaxed(_regptr); \ ++ (*(typeof(_regptr))(&_regval))._field = _value; \ ++ writel_relaxed(_regval, _regptr); \ ++ }) ++ ++ ++/* =================== Doorbell transport protocol operations =============== */ ++ ++static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ writel_relaxed(BIT(priv->doorbell), ++ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear); ++ return 0; ++} ++ ++static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ writel_relaxed(BIT(priv->doorbell), ++ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set); ++} ++ ++static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ writel_relaxed(BIT(priv->doorbell), ++ &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear); ++ return NULL; ++} ++ ++static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) & ++ BIT(priv->doorbell)); ++} ++ ++static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan, ++ void *arg) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags); ++ ++ priv->pending = 1; ++ writel_relaxed(BIT(priv->doorbell), ++ &mhu->send->ch_wn[priv->ch_wn_idx].stat_set); ++ ++ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags); ++ ++ return 0; ++} ++ ++static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = { ++ .rx_startup = mhuv2_doorbell_rx_startup, ++ .rx_shutdown = mhuv2_doorbell_rx_shutdown, ++ .read_data = mhuv2_doorbell_read_data, ++ .last_tx_done = mhuv2_doorbell_last_tx_done, ++ .send_data = mhuv2_doorbell_send_data, ++}; ++#define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops) ++ ++/* ============= Data transfer transport protocol operations ================ */ ++ ++static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ int i = priv->ch_wn_idx + priv->windows - 1; ++ ++ /* ++ * The protocol mandates that all but the last status register must be ++ * masked. ++ */ ++ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear); ++ return 0; ++} ++ ++static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ int i = priv->ch_wn_idx + priv->windows - 1; ++ ++ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set); ++} ++ ++static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ const int windows = priv->windows; ++ struct arm_mhuv2_mbox_msg *msg; ++ u32 *data; ++ int i, idx; ++ ++ msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL); ++ if (!msg) ++ return ERR_PTR(-ENOMEM); ++ ++ data = msg->data = msg + 1; ++ msg->len = windows * MHUV2_STAT_BYTES; ++ ++ /* ++ * Messages are expected in order of most significant word to least ++ * significant word. Refer mhuv2_data_transfer_send_data() for more ++ * details. ++ * ++ * We also need to read the stat register instead of stat_masked, as we ++ * masked all but the last window. ++ * ++ * Last channel window must be cleared as the final operation. Upon ++ * clearing the last channel window register, which is unmasked in ++ * data-transfer protocol, the interrupt is de-asserted. ++ */ ++ for (i = 0; i < windows; i++) { ++ idx = priv->ch_wn_idx + i; ++ data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat); ++ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear); ++ } ++ ++ return msg; ++} ++ ++static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ int i = priv->ch_wn_idx + priv->windows - 1; ++ ++ /* Enable interrupts only for the last window */ ++ if (mhu->minor) { ++ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr); ++ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en); ++ } ++} ++ ++static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ int i = priv->ch_wn_idx + priv->windows - 1; ++ ++ if (mhu->minor) ++ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en); ++} ++ ++static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu, ++ struct mbox_chan *chan) ++{ ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ int i = priv->ch_wn_idx + priv->windows - 1; ++ ++ /* Just checking the last channel window should be enough */ ++ return !readl_relaxed(&mhu->send->ch_wn[i].stat); ++} ++ ++/* ++ * Message will be transmitted from most significant to least significant word. ++ * This is to allow for messages shorter than channel windows to still trigger ++ * the receiver interrupt which gets activated when the last stat register is ++ * written. As an example, a 6-word message is to be written on a 4-channel MHU ++ * connection: Registers marked with '*' are masked, and will not generate an ++ * interrupt on the receiver side once written. ++ * ++ * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004], ++ * [0x00000005], [0x00000006] ++ * ++ * ROUND 1: ++ * stat reg To write Write sequence ++ * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver ++ * [ stat 2 ] <- [0x00000002] 3 ++ * [ stat 1 ] <- [0x00000003] 2 ++ * [ stat 0 ] <- [0x00000004] 1 ++ * ++ * data += 4 // Increment data pointer by number of stat regs ++ * ++ * ROUND 2: ++ * stat reg To write Write sequence ++ * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver ++ * [ stat 2 ] <- [0x00000006] 1 ++ * [ stat 1 ] <- [0x00000000] ++ * [ stat 0 ] <- [0x00000000] ++ */ ++static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu, ++ struct mbox_chan *chan, void *arg) ++{ ++ const struct arm_mhuv2_mbox_msg *msg = arg; ++ int bytes_left = msg->len, bytes_to_send, bytes_in_round, i; ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ int windows = priv->windows; ++ u32 *data = msg->data, word; ++ ++ while (bytes_left) { ++ if (!data[0]) { ++ dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver"); ++ return -EINVAL; ++ } ++ ++ while(!mhuv2_data_transfer_last_tx_done(mhu, chan)) ++ continue; ++ ++ bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES)); ++ ++ for (i = windows - 1; i >= 0; i--) { ++ /* Data less than windows can transfer ? */ ++ if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES)) ++ continue; ++ ++ word = data[i]; ++ bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1); ++ if (unlikely(bytes_to_send)) ++ word &= LSB_MASK(bytes_to_send); ++ else ++ bytes_to_send = MHUV2_STAT_BYTES; ++ ++ writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set); ++ bytes_left -= bytes_to_send; ++ bytes_in_round -= bytes_to_send; ++ } ++ ++ data += windows; ++ } ++ ++ return 0; ++} ++ ++static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = { ++ .rx_startup = mhuv2_data_transfer_rx_startup, ++ .rx_shutdown = mhuv2_data_transfer_rx_shutdown, ++ .read_data = mhuv2_data_transfer_read_data, ++ .tx_startup = mhuv2_data_transfer_tx_startup, ++ .tx_shutdown = mhuv2_data_transfer_tx_shutdown, ++ .last_tx_done = mhuv2_data_transfer_last_tx_done, ++ .send_data = mhuv2_data_transfer_send_data, ++}; ++ ++/* Interrupt handlers */ ++ ++static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 *reg) ++{ ++ struct mbox_chan *chans = mhu->mbox.chans; ++ int channel = 0, i, offset = 0, windows, protocol, ch_wn; ++ u32 stat; ++ ++ for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) { ++ stat = readl_relaxed(reg + i); ++ if (!stat) ++ continue; ++ ++ ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat); ++ ++ for (i = 0; i < mhu->length; i += 2) { ++ protocol = mhu->protocols[i]; ++ windows = mhu->protocols[i + 1]; ++ ++ if (ch_wn >= offset + windows) { ++ if (protocol == DOORBELL) ++ channel += MHUV2_STAT_BITS * windows; ++ else ++ channel++; ++ ++ offset += windows; ++ continue; ++ } ++ ++ /* Return first chan of the window in doorbell mode */ ++ if (protocol == DOORBELL) ++ channel += MHUV2_STAT_BITS * (ch_wn - offset); ++ ++ return &chans[channel]; ++ } ++ } ++ ++ return ERR_PTR(-EIO); ++} ++ ++static irqreturn_t mhuv2_sender_interrupt(int irq, void *data) ++{ ++ struct mhuv2 *mhu = data; ++ struct device *dev = mhu->mbox.dev; ++ struct mhuv2_mbox_chan_priv *priv; ++ struct mbox_chan *chan; ++ unsigned long flags; ++ int i, found = 0; ++ u32 stat; ++ ++ chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st); ++ if (IS_ERR(chan)) { ++ dev_warn(dev, "Failed to find channel for the Tx interrupt\n"); ++ return IRQ_NONE; ++ } ++ priv = chan->con_priv; ++ ++ if (!IS_PROTOCOL_DOORBELL(priv)) { ++ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr); ++ ++ if (chan->cl) { ++ mbox_chan_txdone(chan, 0); ++ return IRQ_HANDLED; ++ } ++ ++ dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n", ++ priv->ch_wn_idx); ++ return IRQ_NONE; ++ } ++ ++ /* Clear the interrupt first, so we don't miss any doorbell later */ ++ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr); ++ ++ /* ++ * In Doorbell mode, make sure no new transitions happen while the ++ * interrupt handler is trying to find the finished doorbell tx ++ * operations, else we may think few of the transfers were complete ++ * before they actually were. ++ */ ++ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags); ++ ++ /* ++ * In case of doorbell mode, the first channel of the window is returned ++ * by get_irq_chan_comb(). Find all the pending channels here. ++ */ ++ stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat); ++ ++ for (i = 0; i < MHUV2_STAT_BITS; i++) { ++ priv = chan[i].con_priv; ++ ++ /* Find cases where pending was 1, but stat's bit is cleared */ ++ if (priv->pending ^ ((stat >> i) & 0x1)) { ++ BUG_ON(!priv->pending); ++ ++ if (!chan->cl) { ++ dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n", ++ priv->ch_wn_idx, i); ++ continue; ++ } ++ ++ mbox_chan_txdone(&chan[i], 0); ++ priv->pending = 0; ++ found++; ++ } ++ } ++ ++ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags); ++ ++ if (!found) { ++ /* ++ * We may have already processed the doorbell in the previous ++ * iteration if the interrupt came right after we cleared it but ++ * before we read the stat register. ++ */ ++ dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n", ++ priv->ch_wn_idx); ++ return IRQ_NONE; ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu) ++{ ++ struct mhuv2_mbox_chan_priv *priv; ++ struct mbox_chan *chan; ++ u32 stat; ++ ++ chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st); ++ if (IS_ERR(chan)) ++ return chan; ++ ++ priv = chan->con_priv; ++ if (!IS_PROTOCOL_DOORBELL(priv)) ++ return chan; ++ ++ /* ++ * In case of doorbell mode, the first channel of the window is returned ++ * by the routine. Find the exact channel here. ++ */ ++ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked); ++ BUG_ON(!stat); ++ ++ return chan + __builtin_ctz(stat); ++} ++ ++static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu) ++{ ++ struct mbox_chan *chans = mhu->mbox.chans; ++ struct mhuv2_mbox_chan_priv *priv; ++ u32 stat; ++ int i = 0; ++ ++ while (i < mhu->mbox.num_chans) { ++ priv = chans[i].con_priv; ++ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked); ++ ++ if (stat) { ++ if (IS_PROTOCOL_DOORBELL(priv)) ++ i += __builtin_ctz(stat); ++ return &chans[i]; ++ } ++ ++ i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1; ++ } ++ ++ return ERR_PTR(-EIO); ++} ++ ++static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu) ++{ ++ if (!mhu->minor) ++ return get_irq_chan_stat_rx(mhu); ++ ++ return get_irq_chan_comb_rx(mhu); ++} ++ ++static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg) ++{ ++ struct mhuv2 *mhu = arg; ++ struct mbox_chan *chan = get_irq_chan_rx(mhu); ++ struct device *dev = mhu->mbox.dev; ++ struct mhuv2_mbox_chan_priv *priv; ++ int ret = IRQ_NONE; ++ void *data; ++ ++ if (IS_ERR(chan)) { ++ dev_warn(dev, "Failed to find channel for the rx interrupt\n"); ++ return IRQ_NONE; ++ } ++ priv = chan->con_priv; ++ ++ /* Read and clear the data first */ ++ data = priv->ops->read_data(mhu, chan); ++ ++ if (!chan->cl) { ++ dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n", ++ priv->ch_wn_idx); ++ } else if (IS_ERR(data)) { ++ dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data)); ++ } else { ++ mbox_chan_received_data(chan, data); ++ ret = IRQ_HANDLED; ++ } ++ ++ kfree(data); ++ return ret; ++} ++ ++/* Sender and receiver ops */ ++static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ return priv->ops->last_tx_done(mhu, chan); ++} ++ ++static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ if (!priv->ops->last_tx_done(mhu, chan)) ++ return -EBUSY; ++ ++ return priv->ops->send_data(mhu, chan, data); ++} ++ ++static int mhuv2_sender_startup(struct mbox_chan *chan) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ if (priv->ops->tx_startup) ++ priv->ops->tx_startup(mhu, chan); ++ return 0; ++} ++ ++static void mhuv2_sender_shutdown(struct mbox_chan *chan) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ if (priv->ops->tx_shutdown) ++ priv->ops->tx_shutdown(mhu, chan); ++} ++ ++static const struct mbox_chan_ops mhuv2_sender_ops = { ++ .send_data = mhuv2_sender_send_data, ++ .startup = mhuv2_sender_startup, ++ .shutdown = mhuv2_sender_shutdown, ++ .last_tx_done = mhuv2_sender_last_tx_done, ++}; ++ ++static int mhuv2_receiver_startup(struct mbox_chan *chan) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ return priv->ops->rx_startup(mhu, chan); ++} ++ ++static void mhuv2_receiver_shutdown(struct mbox_chan *chan) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox); ++ struct mhuv2_mbox_chan_priv *priv = chan->con_priv; ++ ++ priv->ops->rx_shutdown(mhu, chan); ++} ++ ++static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data) ++{ ++ dev_err(chan->mbox->dev, ++ "Trying to transmit on a receiver MHU frame\n"); ++ return -EIO; ++} ++ ++static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan) ++{ ++ dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n"); ++ return true; ++} ++ ++static const struct mbox_chan_ops mhuv2_receiver_ops = { ++ .send_data = mhuv2_receiver_send_data, ++ .startup = mhuv2_receiver_startup, ++ .shutdown = mhuv2_receiver_shutdown, ++ .last_tx_done = mhuv2_receiver_last_tx_done, ++}; ++ ++static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox, ++ const struct of_phandle_args *pa) ++{ ++ struct mhuv2 *mhu = mhu_from_mbox(mbox); ++ struct mbox_chan *chans = mbox->chans; ++ int channel = 0, i, offset, doorbell, protocol, windows; ++ ++ if (pa->args_count != 2) ++ return ERR_PTR(-EINVAL); ++ ++ offset = pa->args[0]; ++ doorbell = pa->args[1]; ++ if (doorbell >= MHUV2_STAT_BITS) ++ goto out; ++ ++ for (i = 0; i < mhu->length; i += 2) { ++ protocol = mhu->protocols[i]; ++ windows = mhu->protocols[i + 1]; ++ ++ if (protocol == DOORBELL) { ++ if (offset < windows) ++ return &chans[channel + MHUV2_STAT_BITS * offset + doorbell]; ++ ++ channel += MHUV2_STAT_BITS * windows; ++ offset -= windows; ++ } else { ++ if (offset == 0) { ++ if (doorbell) ++ goto out; ++ ++ return &chans[channel]; ++ } ++ ++ channel++; ++ offset--; ++ } ++ } ++ ++out: ++ dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n", ++ pa->args[0], doorbell); ++ return ERR_PTR(-ENODEV); ++} ++ ++static int mhuv2_verify_protocol(struct mhuv2 *mhu) ++{ ++ struct device *dev = mhu->mbox.dev; ++ int protocol, windows, channels = 0, total_windows = 0, i; ++ ++ for (i = 0; i < mhu->length; i += 2) { ++ protocol = mhu->protocols[i]; ++ windows = mhu->protocols[i + 1]; ++ ++ if (!windows) { ++ dev_err(dev, "Window size can't be zero (%d)\n", i); ++ return -EINVAL; ++ } ++ total_windows += windows; ++ ++ if (protocol == DOORBELL) { ++ channels += MHUV2_STAT_BITS * windows; ++ } else if (protocol == DATA_TRANSFER) { ++ channels++; ++ } else { ++ dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n", ++ protocol, MHUV2_PROTOCOL_PROP, i); ++ return -EINVAL; ++ } ++ } ++ ++ if (total_windows > mhu->windows) { ++ dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n", ++ total_windows, mhu->windows); ++ return -EINVAL; ++ } ++ ++ mhu->mbox.num_chans = channels; ++ return 0; ++} ++ ++static int mhuv2_allocate_channels(struct mhuv2 *mhu) ++{ ++ struct mbox_controller *mbox = &mhu->mbox; ++ struct mhuv2_mbox_chan_priv *priv; ++ struct device *dev = mbox->dev; ++ struct mbox_chan *chans; ++ int protocol, windows = 0, next_window = 0, i, j, k; ++ ++ chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL); ++ if (!chans) ++ return -ENOMEM; ++ ++ mbox->chans = chans; ++ ++ for (i = 0; i < mhu->length; i += 2) { ++ next_window += windows; ++ ++ protocol = mhu->protocols[i]; ++ windows = mhu->protocols[i + 1]; ++ ++ if (protocol == DATA_TRANSFER) { ++ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ priv->ch_wn_idx = next_window; ++ priv->ops = &mhuv2_data_transfer_ops; ++ priv->windows = windows; ++ chans++->con_priv = priv; ++ continue; ++ } ++ ++ for (j = 0; j < windows; j++) { ++ for (k = 0; k < MHUV2_STAT_BITS; k++) { ++ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ priv->ch_wn_idx = next_window + j; ++ priv->ops = &mhuv2_doorbell_ops; ++ priv->doorbell = k; ++ chans++->con_priv = priv; ++ } ++ ++ /* ++ * Permanently enable interrupt as we can't ++ * control it per doorbell. ++ */ ++ if (mhu->frame == SENDER_FRAME && mhu->minor) ++ writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en); ++ } ++ } ++ ++ /* Make sure we have initialized all channels */ ++ BUG_ON(chans - mbox->chans != mbox->num_chans); ++ ++ return 0; ++} ++ ++static int mhuv2_parse_channels(struct mhuv2 *mhu) ++{ ++ struct device *dev = mhu->mbox.dev; ++ const struct device_node *np = dev->of_node; ++ int ret, count; ++ u32 *protocols; ++ ++ count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP); ++ if (count <= 0 || count % 2) { ++ dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP, ++ count); ++ return -EINVAL; ++ } ++ ++ protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL); ++ if (!protocols) ++ return -ENOMEM; ++ ++ ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count); ++ if (ret) { ++ dev_err(dev, "Failed to read %s property: %d\n", ++ MHUV2_PROTOCOL_PROP, ret); ++ return ret; ++ } ++ ++ mhu->protocols = protocols; ++ mhu->length = count; ++ ++ ret = mhuv2_verify_protocol(mhu); ++ if (ret) ++ return ret; ++ ++ return mhuv2_allocate_channels(mhu); ++} ++ ++static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu, ++ void __iomem *reg) ++{ ++ struct device *dev = mhu->mbox.dev; ++ int ret, i; ++ ++ mhu->frame = SENDER_FRAME; ++ mhu->mbox.ops = &mhuv2_sender_ops; ++ mhu->send = reg; ++ ++ mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, num_ch); ++ mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, arch_minor_rev); ++ ++ spin_lock_init(&mhu->doorbell_pending_lock); ++ ++ /* ++ * For minor version 1 and forward, tx interrupt is provided by ++ * the controller. ++ */ ++ if (mhu->minor && adev->irq[0]) { ++ ret = devm_request_threaded_irq(dev, adev->irq[0], NULL, ++ mhuv2_sender_interrupt, ++ IRQF_ONESHOT, "mhuv2-tx", mhu); ++ if (ret) { ++ dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n", ++ ret); ++ } else { ++ mhu->mbox.txdone_irq = true; ++ mhu->mbox.txdone_poll = false; ++ mhu->irq = adev->irq[0]; ++ ++ writel_relaxed_bitfield(1, &mhu->send->int_en, chcomb); ++ ++ /* Disable all channel interrupts */ ++ for (i = 0; i < mhu->windows; i++) ++ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en); ++ ++ goto out; ++ } ++ } ++ ++ mhu->mbox.txdone_irq = false; ++ mhu->mbox.txdone_poll = true; ++ mhu->mbox.txpoll_period = 1; ++ ++out: ++ /* Wait for receiver to be ready */ ++ writel_relaxed(0x1, &mhu->send->access_request); ++ while (!readl_relaxed(&mhu->send->access_ready)) ++ continue; ++ ++ return 0; ++} ++ ++static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu, ++ void __iomem *reg) ++{ ++ struct device *dev = mhu->mbox.dev; ++ int ret, i; ++ ++ mhu->frame = RECEIVER_FRAME; ++ mhu->mbox.ops = &mhuv2_receiver_ops; ++ mhu->recv = reg; ++ ++ mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, num_ch); ++ mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, arch_minor_rev); ++ ++ mhu->irq = adev->irq[0]; ++ if (!mhu->irq) { ++ dev_err(dev, "Missing receiver IRQ\n"); ++ return -EINVAL; ++ } ++ ++ ret = devm_request_threaded_irq(dev, mhu->irq, NULL, ++ mhuv2_receiver_interrupt, IRQF_ONESHOT, ++ "mhuv2-rx", mhu); ++ if (ret) { ++ dev_err(dev, "Failed to request rx IRQ\n"); ++ return ret; ++ } ++ ++ /* Mask all the channel windows */ ++ for (i = 0; i < mhu->windows; i++) ++ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set); ++ ++ if (mhu->minor) ++ writel_relaxed_bitfield(1, &mhu->recv->int_en, chcomb); ++ ++ return 0; ++} ++ ++static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id) ++{ ++ struct device *dev = &adev->dev; ++ const struct device_node *np = dev->of_node; ++ struct mhuv2 *mhu; ++ void __iomem *reg; ++ int ret = -EINVAL; ++ ++ reg = devm_of_iomap(dev, dev->of_node, 0, NULL); ++ if (!reg) ++ return -ENOMEM; ++ ++ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL); ++ if (!mhu) ++ return -ENOMEM; ++ ++ mhu->mbox.dev = dev; ++ mhu->mbox.of_xlate = mhuv2_mbox_of_xlate; ++ ++ if (of_device_is_compatible(np, "arm,mhuv2-tx")) ++ ret = mhuv2_tx_init(adev, mhu, reg); ++ else if (of_device_is_compatible(np, "arm,mhuv2-rx")) ++ ret = mhuv2_rx_init(adev, mhu, reg); ++ else ++ dev_err(dev, "Invalid compatible property\n"); ++ ++ if (ret) ++ return ret; ++ ++ /* Channel windows can't be 0 */ ++ BUG_ON(!mhu->windows); ++ ++ ret = mhuv2_parse_channels(mhu); ++ if (ret) ++ return ret; ++ ++ amba_set_drvdata(adev, mhu); ++ ++ ret = devm_mbox_controller_register(dev, &mhu->mbox); ++ if (ret) ++ dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret); ++ ++ return ret; ++} ++ ++static int mhuv2_remove(struct amba_device *adev) ++{ ++ struct mhuv2 *mhu = amba_get_drvdata(adev); ++ ++ if (mhu->frame == SENDER_FRAME) ++ writel_relaxed(0x0, &mhu->send->access_request); ++ ++ return 0; ++} ++ ++static struct amba_id mhuv2_ids[] = { ++ { ++ /* 2.0 */ ++ .id = 0xbb0d1, ++ .mask = 0xfffff, ++ }, ++ { ++ /* 2.1 */ ++ .id = 0xbb076, ++ .mask = 0xfffff, ++ }, ++ { 0, 0 }, ++}; ++MODULE_DEVICE_TABLE(amba, mhuv2_ids); ++ ++static struct amba_driver mhuv2_driver = { ++ .drv = { ++ .name = "arm-mhuv2", ++ }, ++ .id_table = mhuv2_ids, ++ .probe = mhuv2_probe, ++ .remove = mhuv2_remove, ++}; ++module_amba_driver(mhuv2_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("ARM MHUv2 Driver"); ++MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); ++MODULE_AUTHOR("Tushar Khandelwal <tushar.khandelwal@arm.com>"); +diff --git a/include/linux/mailbox/arm_mhuv2_message.h b/include/linux/mailbox/arm_mhuv2_message.h +new file mode 100644 +index 000000000000..821b9d96daa4 +--- /dev/null ++++ b/include/linux/mailbox/arm_mhuv2_message.h +@@ -0,0 +1,20 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * ARM MHUv2 Mailbox Message ++ * ++ * Copyright (C) 2020 Arm Ltd. ++ * Copyright (C) 2020 Linaro Ltd. ++ */ ++ ++#ifndef _LINUX_ARM_MHUV2_MESSAGE_H_ ++#define _LINUX_ARM_MHUV2_MESSAGE_H_ ++ ++#include <linux/types.h> ++ ++/* Data structure for data-transfer protocol */ ++struct arm_mhuv2_mbox_msg { ++ void *data; ++ size_t len; ++}; ++ ++#endif /* _LINUX_ARM_MHUV2_MESSAGE_H_ */ +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0005-mailbox-arm_mhuv2-Fix-sparse-warnings.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0005-mailbox-arm_mhuv2-Fix-sparse-warnings.patch new file mode 100644 index 0000000000..8905f745a5 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0005-mailbox-arm_mhuv2-Fix-sparse-warnings.patch @@ -0,0 +1,114 @@ +From 1c75e7d566e29258e9daf7b1548f2d681efb4aea Mon Sep 17 00:00:00 2001 +From: Viresh Kumar <viresh.kumar@linaro.org> +Date: Wed, 30 Dec 2020 10:12:04 +0530 +Subject: [PATCH 05/22] mailbox: arm_mhuv2: Fix sparse warnings + +This patch fixes a bunch of sparse warnings in the newly added arm_mhuv2 +driver. + +drivers/mailbox/arm_mhuv2.c:506:24: warning: incorrect type in argument 1 (different address spaces) +drivers/mailbox/arm_mhuv2.c:506:24: expected void const volatile [noderef] __iomem *addr +drivers/mailbox/arm_mhuv2.c:506:24: got unsigned int [usertype] * +drivers/mailbox/arm_mhuv2.c:547:42: warning: incorrect type in argument 2 (different address spaces) +drivers/mailbox/arm_mhuv2.c:547:42: expected unsigned int [usertype] *reg +drivers/mailbox/arm_mhuv2.c:547:42: got unsigned int [noderef] __iomem * +drivers/mailbox/arm_mhuv2.c:625:42: warning: incorrect type in argument 2 (different address spaces) +drivers/mailbox/arm_mhuv2.c:625:42: expected unsigned int [usertype] *reg +drivers/mailbox/arm_mhuv2.c:625:42: got unsigned int [noderef] __iomem * +drivers/mailbox/arm_mhuv2.c:972:24: warning: dereference of noderef expression +drivers/mailbox/arm_mhuv2.c:973:22: warning: dereference of noderef expression +drivers/mailbox/arm_mhuv2.c:993:25: warning: dereference of noderef expression +drivers/mailbox/arm_mhuv2.c:1026:24: warning: dereference of noderef expression +drivers/mailbox/arm_mhuv2.c:1027:22: warning: dereference of noderef expression +drivers/mailbox/arm_mhuv2.c:1048:17: warning: dereference of noderef expression + +Reported-by: kernel test robot <lkp@intel.com> +Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> +Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org> + +Upstream-Status: Backport [https://lkml.org/lkml/2021/2/9/428] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + drivers/mailbox/arm_mhuv2.c | 22 +++++++++++----------- + 1 file changed, 11 insertions(+), 11 deletions(-) + +diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c +index 67fb10885bb4..8223c1005254 100644 +--- a/drivers/mailbox/arm_mhuv2.c ++++ b/drivers/mailbox/arm_mhuv2.c +@@ -238,19 +238,19 @@ struct mhuv2_mbox_chan_priv { + }; + + /* Macro for reading a bitfield within a physically mapped packed struct */ +-#define readl_relaxed_bitfield(_regptr, _field) \ ++#define readl_relaxed_bitfield(_regptr, _type, _field) \ + ({ \ + u32 _regval; \ + _regval = readl_relaxed((_regptr)); \ +- (*(typeof((_regptr)))(&_regval))._field; \ ++ (*(_type *)(&_regval))._field; \ + }) + + /* Macro for writing a bitfield within a physically mapped packed struct */ +-#define writel_relaxed_bitfield(_value, _regptr, _field) \ ++#define writel_relaxed_bitfield(_value, _regptr, _type, _field) \ + ({ \ + u32 _regval; \ + _regval = readl_relaxed(_regptr); \ +- (*(typeof(_regptr))(&_regval))._field = _value; \ ++ (*(_type *)(&_regval))._field = _value; \ + writel_relaxed(_regval, _regptr); \ + }) + +@@ -496,7 +496,7 @@ static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = { + + /* Interrupt handlers */ + +-static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 *reg) ++static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg) + { + struct mbox_chan *chans = mhu->mbox.chans; + int channel = 0, i, offset = 0, windows, protocol, ch_wn; +@@ -969,8 +969,8 @@ static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu, + mhu->mbox.ops = &mhuv2_sender_ops; + mhu->send = reg; + +- mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, num_ch); +- mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, arch_minor_rev); ++ mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch); ++ mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev); + + spin_lock_init(&mhu->doorbell_pending_lock); + +@@ -990,7 +990,7 @@ static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu, + mhu->mbox.txdone_poll = false; + mhu->irq = adev->irq[0]; + +- writel_relaxed_bitfield(1, &mhu->send->int_en, chcomb); ++ writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb); + + /* Disable all channel interrupts */ + for (i = 0; i < mhu->windows; i++) +@@ -1023,8 +1023,8 @@ static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu, + mhu->mbox.ops = &mhuv2_receiver_ops; + mhu->recv = reg; + +- mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, num_ch); +- mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, arch_minor_rev); ++ mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch); ++ mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev); + + mhu->irq = adev->irq[0]; + if (!mhu->irq) { +@@ -1045,7 +1045,7 @@ static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu, + writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set); + + if (mhu->minor) +- writel_relaxed_bitfield(1, &mhu->recv->int_en, chcomb); ++ writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb); + + return 0; + } +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0006-mailbox-arm_mhuv2-make-remove-callback-return-void.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0006-mailbox-arm_mhuv2-make-remove-callback-return-void.patch new file mode 100644 index 0000000000..a353f31b0a --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0006-mailbox-arm_mhuv2-make-remove-callback-return-void.patch @@ -0,0 +1,47 @@ +From 107f39e7741bb77688df47ce3f56b25cceb301c3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de> +Date: Tue, 2 Feb 2021 20:43:08 +0100 +Subject: [PATCH 06/22] mailbox: arm_mhuv2: make remove callback return void +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +My build tests failed to catch that amba driver that would have needed +adaption in commit 3fd269e74f2f ("amba: Make the remove callback return +void"). Change the remove function to make the driver build again. + +Reported-by: kernel test robot <lkp@intel.com> +Fixes: 3fd269e74f2f ("amba: Make the remove callback return void") +Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> +Acked-by: Viresh Kumar <viresh.kumar@linaro.org> +Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org> + +Upstream-Status: Backport [https://lkml.org/lkml/2021/2/2/1525] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + drivers/mailbox/arm_mhuv2.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c +index 8223c1005254..cdfb1939fabf 100644 +--- a/drivers/mailbox/arm_mhuv2.c ++++ b/drivers/mailbox/arm_mhuv2.c +@@ -1095,14 +1095,12 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id) + return ret; + } + +-static int mhuv2_remove(struct amba_device *adev) ++static void mhuv2_remove(struct amba_device *adev) + { + struct mhuv2 *mhu = amba_get_drvdata(adev); + + if (mhu->frame == SENDER_FRAME) + writel_relaxed(0x0, &mhu->send->access_request); +- +- return 0; + } + + static struct amba_id mhuv2_ids[] = { +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0007-mailbox-arm_mhuv2-Skip-calling-kfree-with-invalid-po.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0007-mailbox-arm_mhuv2-Skip-calling-kfree-with-invalid-po.patch new file mode 100644 index 0000000000..cf5b0b06df --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0007-mailbox-arm_mhuv2-Skip-calling-kfree-with-invalid-po.patch @@ -0,0 +1,41 @@ +From 81d76e92b03a6f33acefd8aef168948c5f595205 Mon Sep 17 00:00:00 2001 +From: Viresh Kumar <viresh.kumar@linaro.org> +Date: Mon, 22 Feb 2021 12:48:06 +0530 +Subject: [PATCH 07/22] mailbox: arm_mhuv2: Skip calling kfree() with invalid + pointer + +It is possible that 'data' passed to kfree() is set to a error value +instead of allocated space. Make sure it doesn't get called with invalid +pointer. + +Fixes: 5a6338cce9f4 ("mailbox: arm_mhuv2: Add driver") +Cc: v5.11 <stable@vger.kernel.org> # v5.11 +Reported-by: kernel test robot <lkp@intel.com> +Reported-by: Dan Carpenter <dan.carpenter@oracle.com> +Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> +Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org> + +Upstream-Status: Backport [https://lkml.org/lkml/2021/2/22/57] +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + drivers/mailbox/arm_mhuv2.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c +index cdfb1939fabf..d997f8ebfa98 100644 +--- a/drivers/mailbox/arm_mhuv2.c ++++ b/drivers/mailbox/arm_mhuv2.c +@@ -699,7 +699,9 @@ static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg) + ret = IRQ_HANDLED; + } + +- kfree(data); ++ if (!IS_ERR(data)) ++ kfree(data); ++ + return ret; + } + +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0008-firmware-arm_ffa-Backport-of-arm_ffa-driver.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0008-firmware-arm_ffa-Backport-of-arm_ffa-driver.patch new file mode 100644 index 0000000000..d91dbc57da --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0008-firmware-arm_ffa-Backport-of-arm_ffa-driver.patch @@ -0,0 +1,1684 @@ +From 85df9333f0adf60fd76eb5ebb21b89c5b0a86c10 Mon Sep 17 00:00:00 2001 +From: Sudeep Holla <sudeep.holla@arm.com> +Date: Tue, 18 May 2021 17:36:18 +0100 +Subject: [PATCH 01/32] firmware: arm_ffa: Backport of arm_ffa driver + +This is a backport of upstream ARM FFA driver from: +https://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux.git/commit/?h=v5.10/ffa&id=c0aff30cb9ad6a00c82acef0f2a48f99adf997c0 + +to branch=android12-5.10-lts + + arm64: smccc: Add support for SMCCCv1.2 extended input/output registers + commit 3fdc0cb59d97f87e2cc708d424f1538e31744286 upstream. + + firmware: arm_ffa: Add initial FFA bus support for device enumeration + commit e781858488b918e30a6ff28e9eab6058b787e3b3 upstream. + + firmware: arm_ffa: Add initial Arm FFA driver support + commit 3bbfe9871005f38df2955b2e125933edf1d2feef upstream. + + firmware: arm_ffa: Add support for SMCCC as transport to FFA driver + commit 714be77e976a4b013b935b3223b2ef68856084d0 upstream. + + firmware: arm_ffa: Setup in-kernel users of FFA partitions + commit d0c0bce831223b08e5bade2cefc93c3ddb790796 upstream. + + firmware: arm_ffa: Add support for MEM_* interfaces + commit cc2195fe536c28e192df5d07e6dd277af36814b4 upstream. + + firmware: arm_ffa: Ensure drivers provide a probe function + commit 92743071464fca5acbbe812d9a0d88de3eaaad36 upstream. + + firmware: arm_ffa: Simplify probe function + commit e362547addc39e4bb18ad5bdfd59ce4d512d0c08 upstream. + + firmware: arm_ffa: Fix the comment style + commit ba684a31d3626c86cd9097e12d6ed57d224d077d upstream. + + firmware: arm_ffa: Fix a possible ffa_linux_errmap buffer overflow + commit dd925db6f07556061c11ab1fbfa4a0145ae6b438 upstream. + + firmware: arm_ffa: Add missing remove callback to ffa_bus_type + commit 244f5d597e1ea519c2085fbd9819458688775e42 upstream. + + firmware: arm_ffa: Fix __ffa_devices_unregister + commit eb7b52e6db7c21400b9b2d539f9343fb6e94bd94 upstream. + + firmware: arm_ffa: Handle compatibility with different firmware versions + commit 8e3f9da608f14cfebac2659d8dd8737b79d01308 upstream. + + firmware: arm_ffa: Add support for MEM_LEND + commit 82a8daaecfd9382e9450a05f86be8a274cf69a27 upstream. + + firmware: arm_ffa: Remove unused 'compat_version' variable + commit 01537a078b86917c7bb69aa4b756b42b980c158b upstream. + +Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> +Change-Id: If9df40d2d10be9e3c95298820bc20c201ea1774c +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> + +Upstream-Status: Backport +Change-Id: I8e6197d8b7ef6654dacd21450069b8e284a3cec5 +--- + MAINTAINERS | 7 + + arch/arm64/kernel/asm-offsets.c | 9 + + arch/arm64/kernel/smccc-call.S | 57 +++ + drivers/firmware/Kconfig | 1 + + drivers/firmware/Makefile | 1 + + drivers/firmware/arm_ffa/Kconfig | 21 + + drivers/firmware/arm_ffa/Makefile | 6 + + drivers/firmware/arm_ffa/bus.c | 220 +++++++++ + drivers/firmware/arm_ffa/common.h | 31 ++ + drivers/firmware/arm_ffa/driver.c | 776 ++++++++++++++++++++++++++++++ + drivers/firmware/arm_ffa/smccc.c | 39 ++ + include/linux/arm-smccc.h | 55 +++ + include/linux/arm_ffa.h | 269 +++++++++++ + 13 files changed, 1492 insertions(+) + create mode 100644 drivers/firmware/arm_ffa/Kconfig + create mode 100644 drivers/firmware/arm_ffa/Makefile + create mode 100644 drivers/firmware/arm_ffa/bus.c + create mode 100644 drivers/firmware/arm_ffa/common.h + create mode 100644 drivers/firmware/arm_ffa/driver.c + create mode 100644 drivers/firmware/arm_ffa/smccc.c + create mode 100644 include/linux/arm_ffa.h + +diff --git a/MAINTAINERS b/MAINTAINERS +index 5234423c477a..d5fdc9e68c89 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -6847,6 +6847,13 @@ F: include/linux/firewire.h + F: include/uapi/linux/firewire*.h + F: tools/firewire/ + ++FIRMWARE FRAMEWORK FOR ARMV8-A ++M: Sudeep Holla <sudeep.holla@arm.com> ++L: linux-arm-kernel@lists.infradead.org ++S: Maintained ++F: drivers/firmware/arm_ffa/ ++F: include/linux/arm_ffa.h ++ + FIRMWARE LOADER (request_firmware) + M: Luis Chamberlain <mcgrof@kernel.org> + L: linux-kernel@vger.kernel.org +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c +index 93da876a58e6..bad4a367da28 100644 +--- a/arch/arm64/kernel/asm-offsets.c ++++ b/arch/arm64/kernel/asm-offsets.c +@@ -139,6 +139,15 @@ int main(void) + DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); + DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); + DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS, offsetof(struct arm_smccc_1_2_regs, a0)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS, offsetof(struct arm_smccc_1_2_regs, a2)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS, offsetof(struct arm_smccc_1_2_regs, a4)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS, offsetof(struct arm_smccc_1_2_regs, a6)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS, offsetof(struct arm_smccc_1_2_regs, a8)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS, offsetof(struct arm_smccc_1_2_regs, a10)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS, offsetof(struct arm_smccc_1_2_regs, a12)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS, offsetof(struct arm_smccc_1_2_regs, a14)); ++ DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS, offsetof(struct arm_smccc_1_2_regs, a16)); + BLANK(); + DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); + DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); +diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S +index d62447964ed9..2def9d0dd3dd 100644 +--- a/arch/arm64/kernel/smccc-call.S ++++ b/arch/arm64/kernel/smccc-call.S +@@ -43,3 +43,60 @@ SYM_FUNC_START(__arm_smccc_hvc) + SMCCC hvc + SYM_FUNC_END(__arm_smccc_hvc) + EXPORT_SYMBOL(__arm_smccc_hvc) ++ ++ .macro SMCCC_1_2 instr ++ /* Save `res` and free a GPR that won't be clobbered */ ++ stp x1, x19, [sp, #-16]! ++ ++ /* Ensure `args` won't be clobbered while loading regs in next step */ ++ mov x19, x0 ++ ++ /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */ ++ ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] ++ ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] ++ ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] ++ ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] ++ ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] ++ ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] ++ ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] ++ ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] ++ ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] ++ ++ \instr #0 ++ ++ /* Load the `res` from the stack */ ++ ldr x19, [sp] ++ ++ /* Store the registers x0 - x17 into the result structure */ ++ stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS] ++ stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS] ++ stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS] ++ stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS] ++ stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS] ++ stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS] ++ stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS] ++ stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS] ++ stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS] ++ ++ /* Restore original x19 */ ++ ldp xzr, x19, [sp], #16 ++ ret ++.endm ++ ++/* ++ * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, ++ * struct arm_smccc_1_2_regs *res); ++ */ ++SYM_FUNC_START(arm_smccc_1_2_hvc) ++ SMCCC_1_2 hvc ++SYM_FUNC_END(arm_smccc_1_2_hvc) ++EXPORT_SYMBOL(arm_smccc_1_2_hvc) ++ ++/* ++ * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, ++ * struct arm_smccc_1_2_regs *res); ++ */ ++SYM_FUNC_START(arm_smccc_1_2_smc) ++ SMCCC_1_2 smc ++SYM_FUNC_END(arm_smccc_1_2_smc) ++EXPORT_SYMBOL(arm_smccc_1_2_smc) +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig +index bfef3d8d14e7..90e6dd32f2cd 100644 +--- a/drivers/firmware/Kconfig ++++ b/drivers/firmware/Kconfig +@@ -296,6 +296,7 @@ config TURRIS_MOX_RWTM + other manufacturing data and also utilize the Entropy Bit Generator + for hardware random number generation. + ++source "drivers/firmware/arm_ffa/Kconfig" + source "drivers/firmware/broadcom/Kconfig" + source "drivers/firmware/google/Kconfig" + source "drivers/firmware/efi/Kconfig" +diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile +index 523173cbff33..3c2af2e98def 100644 +--- a/drivers/firmware/Makefile ++++ b/drivers/firmware/Makefile +@@ -23,6 +23,7 @@ obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o + obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o + obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o + ++obj-y += arm_ffa/ + obj-y += arm_scmi/ + obj-y += broadcom/ + obj-y += meson/ +diff --git a/drivers/firmware/arm_ffa/Kconfig b/drivers/firmware/arm_ffa/Kconfig +new file mode 100644 +index 000000000000..5e3ae5cf82e8 +--- /dev/null ++++ b/drivers/firmware/arm_ffa/Kconfig +@@ -0,0 +1,21 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++config ARM_FFA_TRANSPORT ++ tristate "Arm Firmware Framework for Armv8-A" ++ depends on OF ++ depends on ARM64 ++ default n ++ help ++ This Firmware Framework(FF) for Arm A-profile processors describes ++ interfaces that standardize communication between the various ++ software images which includes communication between images in ++ the Secure world and Normal world. It also leverages the ++ virtualization extension to isolate software images provided ++ by an ecosystem of vendors from each other. ++ ++ This driver provides interface for all the client drivers making ++ use of the features offered by ARM FF-A. ++ ++config ARM_FFA_SMCCC ++ bool ++ default ARM_FFA_TRANSPORT ++ depends on ARM64 && HAVE_ARM_SMCCC_DISCOVERY +diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile +new file mode 100644 +index 000000000000..9d9f37523200 +--- /dev/null ++++ b/drivers/firmware/arm_ffa/Makefile +@@ -0,0 +1,6 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ffa-bus-y = bus.o ++ffa-driver-y = driver.o ++ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o ++ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y) ++obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o +diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c +new file mode 100644 +index 000000000000..fca1e311ea6c +--- /dev/null ++++ b/drivers/firmware/arm_ffa/bus.c +@@ -0,0 +1,220 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2021 ARM Ltd. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include <linux/arm_ffa.h> ++#include <linux/device.h> ++#include <linux/fs.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/slab.h> ++#include <linux/types.h> ++ ++#include "common.h" ++ ++static int ffa_device_match(struct device *dev, struct device_driver *drv) ++{ ++ const struct ffa_device_id *id_table; ++ struct ffa_device *ffa_dev; ++ ++ id_table = to_ffa_driver(drv)->id_table; ++ ffa_dev = to_ffa_dev(dev); ++ ++ while (!uuid_is_null(&id_table->uuid)) { ++ /* ++ * FF-A v1.0 doesn't provide discovery of UUIDs, just the ++ * partition IDs, so fetch the partitions IDs for this ++ * id_table UUID and assign the UUID to the device if the ++ * partition ID matches ++ */ ++ if (uuid_is_null(&ffa_dev->uuid)) ++ ffa_device_match_uuid(ffa_dev, &id_table->uuid); ++ ++ if (uuid_equal(&ffa_dev->uuid, &id_table->uuid)) ++ return 1; ++ id_table++; ++ } ++ ++ return 0; ++} ++ ++static int ffa_device_probe(struct device *dev) ++{ ++ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); ++ struct ffa_device *ffa_dev = to_ffa_dev(dev); ++ ++ return ffa_drv->probe(ffa_dev); ++} ++ ++static int ffa_device_remove(struct device *dev) ++{ ++ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); ++ ++ ffa_drv->remove(to_ffa_dev(dev)); ++ ++ return 0; ++} ++ ++static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env) ++{ ++ struct ffa_device *ffa_dev = to_ffa_dev(dev); ++ ++ return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb", ++ ffa_dev->vm_id, &ffa_dev->uuid); ++} ++ ++static ssize_t partition_id_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct ffa_device *ffa_dev = to_ffa_dev(dev); ++ ++ return sprintf(buf, "0x%04x\n", ffa_dev->vm_id); ++} ++static DEVICE_ATTR_RO(partition_id); ++ ++static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct ffa_device *ffa_dev = to_ffa_dev(dev); ++ ++ return sprintf(buf, "%pUb\n", &ffa_dev->uuid); ++} ++static DEVICE_ATTR_RO(uuid); ++ ++static struct attribute *ffa_device_attributes_attrs[] = { ++ &dev_attr_partition_id.attr, ++ &dev_attr_uuid.attr, ++ NULL, ++}; ++ATTRIBUTE_GROUPS(ffa_device_attributes); ++ ++struct bus_type ffa_bus_type = { ++ .name = "arm_ffa", ++ .match = ffa_device_match, ++ .probe = ffa_device_probe, ++ .remove = ffa_device_remove, ++ .uevent = ffa_device_uevent, ++ .dev_groups = ffa_device_attributes_groups, ++}; ++EXPORT_SYMBOL_GPL(ffa_bus_type); ++ ++int ffa_driver_register(struct ffa_driver *driver, struct module *owner, ++ const char *mod_name) ++{ ++ int ret; ++ ++ if (!driver->probe) ++ return -EINVAL; ++ ++ driver->driver.bus = &ffa_bus_type; ++ driver->driver.name = driver->name; ++ driver->driver.owner = owner; ++ driver->driver.mod_name = mod_name; ++ ++ ret = driver_register(&driver->driver); ++ if (!ret) ++ pr_debug("registered new ffa driver %s\n", driver->name); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(ffa_driver_register); ++ ++void ffa_driver_unregister(struct ffa_driver *driver) ++{ ++ driver_unregister(&driver->driver); ++} ++EXPORT_SYMBOL_GPL(ffa_driver_unregister); ++ ++static void ffa_release_device(struct device *dev) ++{ ++ struct ffa_device *ffa_dev = to_ffa_dev(dev); ++ ++ kfree(ffa_dev); ++} ++ ++static int __ffa_devices_unregister(struct device *dev, void *data) ++{ ++ device_unregister(dev); ++ ++ return 0; ++} ++ ++static void ffa_devices_unregister(void) ++{ ++ bus_for_each_dev(&ffa_bus_type, NULL, NULL, ++ __ffa_devices_unregister); ++} ++ ++bool ffa_device_is_valid(struct ffa_device *ffa_dev) ++{ ++ bool valid = false; ++ struct device *dev = NULL; ++ struct ffa_device *tmp_dev; ++ ++ do { ++ dev = bus_find_next_device(&ffa_bus_type, dev); ++ tmp_dev = to_ffa_dev(dev); ++ if (tmp_dev == ffa_dev) { ++ valid = true; ++ break; ++ } ++ put_device(dev); ++ } while (dev); ++ ++ put_device(dev); ++ ++ return valid; ++} ++ ++struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) ++{ ++ int ret; ++ struct device *dev; ++ struct ffa_device *ffa_dev; ++ ++ ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL); ++ if (!ffa_dev) ++ return NULL; ++ ++ dev = &ffa_dev->dev; ++ dev->bus = &ffa_bus_type; ++ dev->release = ffa_release_device; ++ dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id); ++ ++ ffa_dev->vm_id = vm_id; ++ uuid_copy(&ffa_dev->uuid, uuid); ++ ++ ret = device_register(&ffa_dev->dev); ++ if (ret) { ++ dev_err(dev, "unable to register device %s err=%d\n", ++ dev_name(dev), ret); ++ put_device(dev); ++ return NULL; ++ } ++ ++ return ffa_dev; ++} ++EXPORT_SYMBOL_GPL(ffa_device_register); ++ ++void ffa_device_unregister(struct ffa_device *ffa_dev) ++{ ++ if (!ffa_dev) ++ return; ++ ++ device_unregister(&ffa_dev->dev); ++} ++EXPORT_SYMBOL_GPL(ffa_device_unregister); ++ ++int arm_ffa_bus_init(void) ++{ ++ return bus_register(&ffa_bus_type); ++} ++ ++void arm_ffa_bus_exit(void) ++{ ++ ffa_devices_unregister(); ++ bus_unregister(&ffa_bus_type); ++} +diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h +new file mode 100644 +index 000000000000..d6eccf1fd3f6 +--- /dev/null ++++ b/drivers/firmware/arm_ffa/common.h +@@ -0,0 +1,31 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2021 ARM Ltd. ++ */ ++ ++#ifndef _FFA_COMMON_H ++#define _FFA_COMMON_H ++ ++#include <linux/arm_ffa.h> ++#include <linux/arm-smccc.h> ++#include <linux/err.h> ++ ++typedef struct arm_smccc_1_2_regs ffa_value_t; ++ ++typedef void (ffa_fn)(ffa_value_t, ffa_value_t *); ++ ++int arm_ffa_bus_init(void); ++void arm_ffa_bus_exit(void); ++bool ffa_device_is_valid(struct ffa_device *ffa_dev); ++void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid); ++ ++#ifdef CONFIG_ARM_FFA_SMCCC ++int __init ffa_transport_init(ffa_fn **invoke_ffa_fn); ++#else ++static inline int __init ffa_transport_init(ffa_fn **invoke_ffa_fn) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ ++#endif /* _FFA_COMMON_H */ +diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c +new file mode 100644 +index 000000000000..14f900047ac0 +--- /dev/null ++++ b/drivers/firmware/arm_ffa/driver.c +@@ -0,0 +1,776 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Arm Firmware Framework for ARMv8-A(FFA) interface driver ++ * ++ * The Arm FFA specification[1] describes a software architecture to ++ * leverages the virtualization extension to isolate software images ++ * provided by an ecosystem of vendors from each other and describes ++ * interfaces that standardize communication between the various software ++ * images including communication between images in the Secure world and ++ * Normal world. Any Hypervisor could use the FFA interfaces to enable ++ * communication between VMs it manages. ++ * ++ * The Hypervisor a.k.a Partition managers in FFA terminology can assign ++ * system resources(Memory regions, Devices, CPU cycles) to the partitions ++ * and manage isolation amongst them. ++ * ++ * [1] https://developer.arm.com/docs/den0077/latest ++ * ++ * Copyright (C) 2021 ARM Ltd. ++ */ ++ ++#define DRIVER_NAME "ARM FF-A" ++#define pr_fmt(fmt) DRIVER_NAME ": " fmt ++ ++#include <linux/arm_ffa.h> ++#include <linux/bitfield.h> ++#include <linux/device.h> ++#include <linux/io.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/mm.h> ++#include <linux/scatterlist.h> ++#include <linux/slab.h> ++#include <linux/uuid.h> ++ ++#include "common.h" ++ ++#define FFA_DRIVER_VERSION FFA_VERSION_1_0 ++ ++#define FFA_SMC(calling_convention, func_num) \ ++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \ ++ ARM_SMCCC_OWNER_STANDARD, (func_num)) ++ ++#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num)) ++#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num)) ++ ++#define FFA_ERROR FFA_SMC_32(0x60) ++#define FFA_SUCCESS FFA_SMC_32(0x61) ++#define FFA_INTERRUPT FFA_SMC_32(0x62) ++#define FFA_VERSION FFA_SMC_32(0x63) ++#define FFA_FEATURES FFA_SMC_32(0x64) ++#define FFA_RX_RELEASE FFA_SMC_32(0x65) ++#define FFA_RXTX_MAP FFA_SMC_32(0x66) ++#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66) ++#define FFA_RXTX_UNMAP FFA_SMC_32(0x67) ++#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68) ++#define FFA_ID_GET FFA_SMC_32(0x69) ++#define FFA_MSG_POLL FFA_SMC_32(0x6A) ++#define FFA_MSG_WAIT FFA_SMC_32(0x6B) ++#define FFA_YIELD FFA_SMC_32(0x6C) ++#define FFA_RUN FFA_SMC_32(0x6D) ++#define FFA_MSG_SEND FFA_SMC_32(0x6E) ++#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F) ++#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F) ++#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70) ++#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70) ++#define FFA_MEM_DONATE FFA_SMC_32(0x71) ++#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71) ++#define FFA_MEM_LEND FFA_SMC_32(0x72) ++#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72) ++#define FFA_MEM_SHARE FFA_SMC_32(0x73) ++#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73) ++#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74) ++#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74) ++#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75) ++#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76) ++#define FFA_MEM_RECLAIM FFA_SMC_32(0x77) ++#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78) ++#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79) ++#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) ++#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) ++#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) ++ ++/* ++ * For some calls it is necessary to use SMC64 to pass or return 64-bit values. ++ * For such calls FFA_FN_NATIVE(name) will choose the appropriate ++ * (native-width) function ID. ++ */ ++#ifdef CONFIG_64BIT ++#define FFA_FN_NATIVE(name) FFA_FN64_##name ++#else ++#define FFA_FN_NATIVE(name) FFA_##name ++#endif ++ ++/* FFA error codes. */ ++#define FFA_RET_SUCCESS (0) ++#define FFA_RET_NOT_SUPPORTED (-1) ++#define FFA_RET_INVALID_PARAMETERS (-2) ++#define FFA_RET_NO_MEMORY (-3) ++#define FFA_RET_BUSY (-4) ++#define FFA_RET_INTERRUPTED (-5) ++#define FFA_RET_DENIED (-6) ++#define FFA_RET_RETRY (-7) ++#define FFA_RET_ABORTED (-8) ++ ++#define MAJOR_VERSION_MASK GENMASK(30, 16) ++#define MINOR_VERSION_MASK GENMASK(15, 0) ++#define MAJOR_VERSION(x) ((u16)(FIELD_GET(MAJOR_VERSION_MASK, (x)))) ++#define MINOR_VERSION(x) ((u16)(FIELD_GET(MINOR_VERSION_MASK, (x)))) ++#define PACK_VERSION_INFO(major, minor) \ ++ (FIELD_PREP(MAJOR_VERSION_MASK, (major)) | \ ++ FIELD_PREP(MINOR_VERSION_MASK, (minor))) ++#define FFA_VERSION_1_0 PACK_VERSION_INFO(1, 0) ++#define FFA_MIN_VERSION FFA_VERSION_1_0 ++ ++#define SENDER_ID_MASK GENMASK(31, 16) ++#define RECEIVER_ID_MASK GENMASK(15, 0) ++#define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x)))) ++#define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x)))) ++#define PACK_TARGET_INFO(s, r) \ ++ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) ++ ++/* ++ * FF-A specification mentions explicitly about '4K pages'. This should ++ * not be confused with the kernel PAGE_SIZE, which is the translation ++ * granule kernel is configured and may be one among 4K, 16K and 64K. ++ */ ++#define FFA_PAGE_SIZE SZ_4K ++/* ++ * Keeping RX TX buffer size as 4K for now ++ * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config ++ */ ++#define RXTX_BUFFER_SIZE SZ_4K ++ ++static ffa_fn *invoke_ffa_fn; ++ ++static const int ffa_linux_errmap[] = { ++ /* better than switch case as long as return value is continuous */ ++ 0, /* FFA_RET_SUCCESS */ ++ -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */ ++ -EINVAL, /* FFA_RET_INVALID_PARAMETERS */ ++ -ENOMEM, /* FFA_RET_NO_MEMORY */ ++ -EBUSY, /* FFA_RET_BUSY */ ++ -EINTR, /* FFA_RET_INTERRUPTED */ ++ -EACCES, /* FFA_RET_DENIED */ ++ -EAGAIN, /* FFA_RET_RETRY */ ++ -ECANCELED, /* FFA_RET_ABORTED */ ++}; ++ ++static inline int ffa_to_linux_errno(int errno) ++{ ++ int err_idx = -errno; ++ ++ if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) ++ return ffa_linux_errmap[err_idx]; ++ return -EINVAL; ++} ++ ++struct ffa_drv_info { ++ u32 version; ++ u16 vm_id; ++ struct mutex rx_lock; /* lock to protect Rx buffer */ ++ struct mutex tx_lock; /* lock to protect Tx buffer */ ++ void *rx_buffer; ++ void *tx_buffer; ++}; ++ ++static struct ffa_drv_info *drv_info; ++ ++/* ++ * The driver must be able to support all the versions from the earliest ++ * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION. ++ * The specification states that if firmware supports a FFA implementation ++ * that is incompatible with and at a greater version number than specified ++ * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION), ++ * it must return the NOT_SUPPORTED error code. ++ */ ++static u32 ffa_compatible_version_find(u32 version) ++{ ++ u16 major = MAJOR_VERSION(version), minor = MINOR_VERSION(version); ++ u16 drv_major = MAJOR_VERSION(FFA_DRIVER_VERSION); ++ u16 drv_minor = MINOR_VERSION(FFA_DRIVER_VERSION); ++ ++ if ((major < drv_major) || (major == drv_major && minor <= drv_minor)) ++ return version; ++ ++ pr_info("Firmware version higher than driver version, downgrading\n"); ++ return FFA_DRIVER_VERSION; ++} ++ ++static int ffa_version_check(u32 *version) ++{ ++ ffa_value_t ver; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION, ++ }, &ver); ++ ++ if (ver.a0 == FFA_RET_NOT_SUPPORTED) { ++ pr_info("FFA_VERSION returned not supported\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (ver.a0 < FFA_MIN_VERSION) { ++ pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", ++ MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0), ++ MAJOR_VERSION(FFA_MIN_VERSION), ++ MINOR_VERSION(FFA_MIN_VERSION)); ++ return -EINVAL; ++ } ++ ++ pr_info("Driver version %d.%d\n", MAJOR_VERSION(FFA_DRIVER_VERSION), ++ MINOR_VERSION(FFA_DRIVER_VERSION)); ++ pr_info("Firmware version %d.%d found\n", MAJOR_VERSION(ver.a0), ++ MINOR_VERSION(ver.a0)); ++ *version = ffa_compatible_version_find(ver.a0); ++ ++ return 0; ++} ++ ++static int ffa_rx_release(void) ++{ ++ ffa_value_t ret; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_RX_RELEASE, ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ /* check for ret.a0 == FFA_RX_RELEASE ? */ ++ ++ return 0; ++} ++ ++static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt) ++{ ++ ffa_value_t ret; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_FN_NATIVE(RXTX_MAP), ++ .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt, ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ return 0; ++} ++ ++static int ffa_rxtx_unmap(u16 vm_id) ++{ ++ ffa_value_t ret; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0), ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ return 0; ++} ++ ++/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ ++static int ++__ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, ++ struct ffa_partition_info *buffer, int num_partitions) ++{ ++ int count; ++ ffa_value_t partition_info; ++ ++ mutex_lock(&drv_info->rx_lock); ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_PARTITION_INFO_GET, ++ .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3, ++ }, &partition_info); ++ ++ if (partition_info.a0 == FFA_ERROR) { ++ mutex_unlock(&drv_info->rx_lock); ++ return ffa_to_linux_errno((int)partition_info.a2); ++ } ++ ++ count = partition_info.a2; ++ ++ if (buffer && count <= num_partitions) ++ memcpy(buffer, drv_info->rx_buffer, sizeof(*buffer) * count); ++ ++ ffa_rx_release(); ++ ++ mutex_unlock(&drv_info->rx_lock); ++ ++ return count; ++} ++ ++/* buffer is allocated and caller must free the same if returned count > 0 */ ++static int ++ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) ++{ ++ int count; ++ u32 uuid0_4[4]; ++ struct ffa_partition_info *pbuf; ++ ++ export_uuid((u8 *)uuid0_4, uuid); ++ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], ++ uuid0_4[3], NULL, 0); ++ if (count <= 0) ++ return count; ++ ++ pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL); ++ if (!pbuf) ++ return -ENOMEM; ++ ++ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], ++ uuid0_4[3], pbuf, count); ++ if (count <= 0) ++ kfree(pbuf); ++ else ++ *buffer = pbuf; ++ ++ return count; ++} ++ ++#define VM_ID_MASK GENMASK(15, 0) ++static int ffa_id_get(u16 *vm_id) ++{ ++ ffa_value_t id; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_ID_GET, ++ }, &id); ++ ++ if (id.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)id.a2); ++ ++ *vm_id = FIELD_GET(VM_ID_MASK, (id.a2)); ++ ++ return 0; ++} ++ ++static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, ++ struct ffa_send_direct_data *data) ++{ ++ u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); ++ ffa_value_t ret; ++ ++ if (mode_32bit) { ++ req_id = FFA_MSG_SEND_DIRECT_REQ; ++ resp_id = FFA_MSG_SEND_DIRECT_RESP; ++ } else { ++ req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ); ++ resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP); ++ } ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = req_id, .a1 = src_dst_ids, .a2 = 0, ++ .a3 = data->data0, .a4 = data->data1, .a5 = data->data2, ++ .a6 = data->data3, .a7 = data->data4, ++ }, &ret); ++ ++ while (ret.a0 == FFA_INTERRUPT) ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_RUN, .a1 = ret.a1, ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ if (ret.a0 == resp_id) { ++ data->data0 = ret.a3; ++ data->data1 = ret.a4; ++ data->data2 = ret.a5; ++ data->data3 = ret.a6; ++ data->data4 = ret.a7; ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, ++ u32 frag_len, u32 len, u64 *handle) ++{ ++ ffa_value_t ret; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = func_id, .a1 = len, .a2 = frag_len, ++ .a3 = buf, .a4 = buf_sz, ++ }, &ret); ++ ++ while (ret.a0 == FFA_MEM_OP_PAUSE) ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_MEM_OP_RESUME, ++ .a1 = ret.a1, .a2 = ret.a2, ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ if (ret.a0 != FFA_SUCCESS) ++ return -EOPNOTSUPP; ++ ++ if (handle) ++ *handle = PACK_HANDLE(ret.a2, ret.a3); ++ ++ return frag_len; ++} ++ ++static int ffa_mem_next_frag(u64 handle, u32 frag_len) ++{ ++ ffa_value_t ret; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_MEM_FRAG_TX, ++ .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle), ++ .a3 = frag_len, ++ }, &ret); ++ ++ while (ret.a0 == FFA_MEM_OP_PAUSE) ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_MEM_OP_RESUME, ++ .a1 = ret.a1, .a2 = ret.a2, ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ if (ret.a0 != FFA_MEM_FRAG_RX) ++ return -EOPNOTSUPP; ++ ++ return ret.a3; ++} ++ ++static int ++ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, ++ u32 len, u64 *handle, bool first) ++{ ++ if (!first) ++ return ffa_mem_next_frag(*handle, frag_len); ++ ++ return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle); ++} ++ ++static u32 ffa_get_num_pages_sg(struct scatterlist *sg) ++{ ++ u32 num_pages = 0; ++ ++ do { ++ num_pages += sg->length / FFA_PAGE_SIZE; ++ } while ((sg = sg_next(sg))); ++ ++ return num_pages; ++} ++ ++static int ++ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, ++ struct ffa_mem_ops_args *args) ++{ ++ int rc = 0; ++ bool first = true; ++ phys_addr_t addr = 0; ++ struct ffa_composite_mem_region *composite; ++ struct ffa_mem_region_addr_range *constituents; ++ struct ffa_mem_region_attributes *ep_mem_access; ++ struct ffa_mem_region *mem_region = buffer; ++ u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); ++ ++ mem_region->tag = args->tag; ++ mem_region->flags = args->flags; ++ mem_region->sender_id = drv_info->vm_id; ++ mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | ++ FFA_MEM_INNER_SHAREABLE; ++ ep_mem_access = &mem_region->ep_mem_access[0]; ++ ++ for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { ++ ep_mem_access->receiver = args->attrs[idx].receiver; ++ ep_mem_access->attrs = args->attrs[idx].attrs; ++ ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); ++ } ++ mem_region->ep_count = args->nattrs; ++ ++ composite = buffer + COMPOSITE_OFFSET(args->nattrs); ++ composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); ++ composite->addr_range_cnt = num_entries; ++ ++ length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); ++ frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); ++ if (frag_len > max_fragsize) ++ return -ENXIO; ++ ++ if (!args->use_txbuf) { ++ addr = virt_to_phys(buffer); ++ buf_sz = max_fragsize / FFA_PAGE_SIZE; ++ } ++ ++ constituents = buffer + frag_len; ++ idx = 0; ++ do { ++ if (frag_len == max_fragsize) { ++ rc = ffa_transmit_fragment(func_id, addr, buf_sz, ++ frag_len, length, ++ &args->g_handle, first); ++ if (rc < 0) ++ return -ENXIO; ++ ++ first = false; ++ idx = 0; ++ frag_len = 0; ++ constituents = buffer; ++ } ++ ++ if ((void *)constituents - buffer > max_fragsize) { ++ pr_err("Memory Region Fragment > Tx Buffer size\n"); ++ return -EFAULT; ++ } ++ ++ constituents->address = sg_phys(args->sg); ++ constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; ++ constituents++; ++ frag_len += sizeof(struct ffa_mem_region_addr_range); ++ } while ((args->sg = sg_next(args->sg))); ++ ++ return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len, ++ length, &args->g_handle, first); ++} ++ ++static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) ++{ ++ int ret; ++ void *buffer; ++ ++ if (!args->use_txbuf) { ++ buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); ++ if (!buffer) ++ return -ENOMEM; ++ } else { ++ buffer = drv_info->tx_buffer; ++ mutex_lock(&drv_info->tx_lock); ++ } ++ ++ ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); ++ ++ if (args->use_txbuf) ++ mutex_unlock(&drv_info->tx_lock); ++ else ++ free_pages_exact(buffer, RXTX_BUFFER_SIZE); ++ ++ return ret < 0 ? ret : 0; ++} ++ ++static int ffa_memory_reclaim(u64 g_handle, u32 flags) ++{ ++ ffa_value_t ret; ++ ++ invoke_ffa_fn((ffa_value_t){ ++ .a0 = FFA_MEM_RECLAIM, ++ .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle), ++ .a3 = flags, ++ }, &ret); ++ ++ if (ret.a0 == FFA_ERROR) ++ return ffa_to_linux_errno((int)ret.a2); ++ ++ return 0; ++} ++ ++static u32 ffa_api_version_get(void) ++{ ++ return drv_info->version; ++} ++ ++static int ffa_partition_info_get(const char *uuid_str, ++ struct ffa_partition_info *buffer) ++{ ++ int count; ++ uuid_t uuid; ++ struct ffa_partition_info *pbuf; ++ ++ if (uuid_parse(uuid_str, &uuid)) { ++ pr_err("invalid uuid (%s)\n", uuid_str); ++ return -ENODEV; ++ } ++ ++ count = ffa_partition_probe(&uuid_null, &pbuf); ++ if (count <= 0) ++ return -ENOENT; ++ ++ memcpy(buffer, pbuf, sizeof(*pbuf) * count); ++ kfree(pbuf); ++ return 0; ++} ++ ++static void ffa_mode_32bit_set(struct ffa_device *dev) ++{ ++ dev->mode_32bit = true; ++} ++ ++static int ffa_sync_send_receive(struct ffa_device *dev, ++ struct ffa_send_direct_data *data) ++{ ++ return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id, ++ dev->mode_32bit, data); ++} ++ ++static int ++ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args) ++{ ++ if (dev->mode_32bit) ++ return ffa_memory_ops(FFA_MEM_SHARE, args); ++ ++ return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); ++} ++ ++static int ++ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args) ++{ ++ /* Note that upon a successful MEM_LEND request the caller ++ * must ensure that the memory region specified is not accessed ++ * until a successful MEM_RECALIM call has been made. ++ * On systems with a hypervisor present this will been enforced, ++ * however on systems without a hypervisor the responsibility ++ * falls to the calling kernel driver to prevent access. ++ */ ++ if (dev->mode_32bit) ++ return ffa_memory_ops(FFA_MEM_LEND, args); ++ ++ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args); ++} ++ ++static const struct ffa_dev_ops ffa_ops = { ++ .api_version_get = ffa_api_version_get, ++ .partition_info_get = ffa_partition_info_get, ++ .mode_32bit_set = ffa_mode_32bit_set, ++ .sync_send_receive = ffa_sync_send_receive, ++ .memory_reclaim = ffa_memory_reclaim, ++ .memory_share = ffa_memory_share, ++ .memory_lend = ffa_memory_lend, ++}; ++ ++const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev) ++{ ++ if (ffa_device_is_valid(dev)) ++ return &ffa_ops; ++ ++ return NULL; ++} ++EXPORT_SYMBOL_GPL(ffa_dev_ops_get); ++ ++void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) ++{ ++ int count, idx; ++ struct ffa_partition_info *pbuf, *tpbuf; ++ ++ count = ffa_partition_probe(uuid, &pbuf); ++ if (count <= 0) ++ return; ++ ++ for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) ++ if (tpbuf->id == ffa_dev->vm_id) ++ uuid_copy(&ffa_dev->uuid, uuid); ++ kfree(pbuf); ++} ++ ++static void ffa_setup_partitions(void) ++{ ++ int count, idx; ++ struct ffa_device *ffa_dev; ++ struct ffa_partition_info *pbuf, *tpbuf; ++ ++ count = ffa_partition_probe(&uuid_null, &pbuf); ++ if (count <= 0) { ++ pr_info("%s: No partitions found, error %d\n", __func__, count); ++ return; ++ } ++ ++ for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { ++ /* Note that the &uuid_null parameter will require ++ * ffa_device_match() to find the UUID of this partition id ++ * with help of ffa_device_match_uuid(). Once the FF-A spec ++ * is updated to provide correct UUID here for each partition ++ * as part of the discovery API, we need to pass the ++ * discovered UUID here instead. ++ */ ++ ffa_dev = ffa_device_register(&uuid_null, tpbuf->id); ++ if (!ffa_dev) { ++ pr_err("%s: failed to register partition ID 0x%x\n", ++ __func__, tpbuf->id); ++ continue; ++ } ++ ++ ffa_dev_set_drvdata(ffa_dev, drv_info); ++ } ++ kfree(pbuf); ++} ++ ++static int __init ffa_init(void) ++{ ++ int ret; ++ ++ ret = ffa_transport_init(&invoke_ffa_fn); ++ if (ret) ++ return ret; ++ ++ ret = arm_ffa_bus_init(); ++ if (ret) ++ return ret; ++ ++ drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); ++ if (!drv_info) { ++ ret = -ENOMEM; ++ goto ffa_bus_exit; ++ } ++ ++ ret = ffa_version_check(&drv_info->version); ++ if (ret) ++ goto free_drv_info; ++ ++ if (ffa_id_get(&drv_info->vm_id)) { ++ pr_err("failed to obtain VM id for self\n"); ++ ret = -ENODEV; ++ goto free_drv_info; ++ } ++ ++ drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); ++ if (!drv_info->rx_buffer) { ++ ret = -ENOMEM; ++ goto free_pages; ++ } ++ ++ drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); ++ if (!drv_info->tx_buffer) { ++ ret = -ENOMEM; ++ goto free_pages; ++ } ++ ++ ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), ++ virt_to_phys(drv_info->rx_buffer), ++ RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); ++ if (ret) { ++ pr_err("failed to register FFA RxTx buffers\n"); ++ goto free_pages; ++ } ++ ++ mutex_init(&drv_info->rx_lock); ++ mutex_init(&drv_info->tx_lock); ++ ++ ffa_setup_partitions(); ++ ++ return 0; ++free_pages: ++ if (drv_info->tx_buffer) ++ free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); ++ free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); ++free_drv_info: ++ kfree(drv_info); ++ffa_bus_exit: ++ arm_ffa_bus_exit(); ++ return ret; ++} ++subsys_initcall(ffa_init); ++ ++static void __exit ffa_exit(void) ++{ ++ ffa_rxtx_unmap(drv_info->vm_id); ++ free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); ++ free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); ++ kfree(drv_info); ++ arm_ffa_bus_exit(); ++} ++module_exit(ffa_exit); ++ ++MODULE_ALIAS("arm-ffa"); ++MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); ++MODULE_DESCRIPTION("Arm FF-A interface driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/firmware/arm_ffa/smccc.c b/drivers/firmware/arm_ffa/smccc.c +new file mode 100644 +index 000000000000..4d85bfff0a4e +--- /dev/null ++++ b/drivers/firmware/arm_ffa/smccc.c +@@ -0,0 +1,39 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2021 ARM Ltd. ++ */ ++ ++#include <linux/printk.h> ++ ++#include "common.h" ++ ++static void __arm_ffa_fn_smc(ffa_value_t args, ffa_value_t *res) ++{ ++ arm_smccc_1_2_smc(&args, res); ++} ++ ++static void __arm_ffa_fn_hvc(ffa_value_t args, ffa_value_t *res) ++{ ++ arm_smccc_1_2_hvc(&args, res); ++} ++ ++int __init ffa_transport_init(ffa_fn **invoke_ffa_fn) ++{ ++ enum arm_smccc_conduit conduit; ++ ++ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) ++ return -EOPNOTSUPP; ++ ++ conduit = arm_smccc_1_1_get_conduit(); ++ if (conduit == SMCCC_CONDUIT_NONE) { ++ pr_err("%s: invalid SMCCC conduit\n", __func__); ++ return -EOPNOTSUPP; ++ } ++ ++ if (conduit == SMCCC_CONDUIT_SMC) ++ *invoke_ffa_fn = __arm_ffa_fn_smc; ++ else ++ *invoke_ffa_fn = __arm_ffa_fn_hvc; ++ ++ return 0; ++} +diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h +index 62c54234576c..c8eb24af3c62 100644 +--- a/include/linux/arm-smccc.h ++++ b/include/linux/arm-smccc.h +@@ -186,6 +186,61 @@ struct arm_smccc_res { + unsigned long a3; + }; + ++#ifdef CONFIG_ARM64 ++/** ++ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call ++ * @a0-a17 argument values from registers 0 to 17 ++ */ ++struct arm_smccc_1_2_regs { ++ unsigned long a0; ++ unsigned long a1; ++ unsigned long a2; ++ unsigned long a3; ++ unsigned long a4; ++ unsigned long a5; ++ unsigned long a6; ++ unsigned long a7; ++ unsigned long a8; ++ unsigned long a9; ++ unsigned long a10; ++ unsigned long a11; ++ unsigned long a12; ++ unsigned long a13; ++ unsigned long a14; ++ unsigned long a15; ++ unsigned long a16; ++ unsigned long a17; ++}; ++ ++/** ++ * arm_smccc_1_2_hvc() - make HVC calls ++ * @args: arguments passed via struct arm_smccc_1_2_regs ++ * @res: result values via struct arm_smccc_1_2_regs ++ * ++ * This function is used to make HVC calls following SMC Calling Convention ++ * v1.2 or above. The content of the supplied param are copied from the ++ * structure to registers prior to the HVC instruction. The return values ++ * are updated with the content from registers on return from the HVC ++ * instruction. ++ */ ++asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args, ++ struct arm_smccc_1_2_regs *res); ++ ++/** ++ * arm_smccc_1_2_smc() - make SMC calls ++ * @args: arguments passed via struct arm_smccc_1_2_regs ++ * @res: result values via struct arm_smccc_1_2_regs ++ * ++ * This function is used to make SMC calls following SMC Calling Convention ++ * v1.2 or above. The content of the supplied param are copied from the ++ * structure to registers prior to the SMC instruction. The return values ++ * are updated with the content from registers on return from the SMC ++ * instruction. ++ */ ++asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args, ++ struct arm_smccc_1_2_regs *res); ++#endif ++ + /** + * struct arm_smccc_quirk - Contains quirk information + * @id: quirk identification +diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h +new file mode 100644 +index 000000000000..85651e41ded8 +--- /dev/null ++++ b/include/linux/arm_ffa.h +@@ -0,0 +1,269 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2021 ARM Ltd. ++ */ ++ ++#ifndef _LINUX_ARM_FFA_H ++#define _LINUX_ARM_FFA_H ++ ++#include <linux/device.h> ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/uuid.h> ++ ++/* FFA Bus/Device/Driver related */ ++struct ffa_device { ++ int vm_id; ++ bool mode_32bit; ++ uuid_t uuid; ++ struct device dev; ++}; ++ ++#define to_ffa_dev(d) container_of(d, struct ffa_device, dev) ++ ++struct ffa_device_id { ++ uuid_t uuid; ++}; ++ ++struct ffa_driver { ++ const char *name; ++ int (*probe)(struct ffa_device *sdev); ++ void (*remove)(struct ffa_device *sdev); ++ const struct ffa_device_id *id_table; ++ ++ struct device_driver driver; ++}; ++ ++#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver) ++ ++static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) ++{ ++ fdev->dev.driver_data = data; ++} ++ ++#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) ++struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id); ++void ffa_device_unregister(struct ffa_device *ffa_dev); ++int ffa_driver_register(struct ffa_driver *driver, struct module *owner, ++ const char *mod_name); ++void ffa_driver_unregister(struct ffa_driver *driver); ++bool ffa_device_is_valid(struct ffa_device *ffa_dev); ++const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev); ++ ++#else ++static inline ++struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) ++{ ++ return NULL; ++} ++ ++static inline void ffa_device_unregister(struct ffa_device *dev) {} ++ ++static inline int ++ffa_driver_register(struct ffa_driver *driver, struct module *owner, ++ const char *mod_name) ++{ ++ return -EINVAL; ++} ++ ++static inline void ffa_driver_unregister(struct ffa_driver *driver) {} ++ ++static inline ++bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } ++ ++static inline ++const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev) ++{ ++ return NULL; ++} ++#endif /* CONFIG_ARM_FFA_TRANSPORT */ ++ ++#define ffa_register(driver) \ ++ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) ++#define ffa_unregister(driver) \ ++ ffa_driver_unregister(driver) ++ ++/** ++ * module_ffa_driver() - Helper macro for registering a psa_ffa driver ++ * @__ffa_driver: ffa_driver structure ++ * ++ * Helper macro for psa_ffa drivers to set up proper module init / exit ++ * functions. Replaces module_init() and module_exit() and keeps people from ++ * printing pointless things to the kernel log when their driver is loaded. ++ */ ++#define module_ffa_driver(__ffa_driver) \ ++ module_driver(__ffa_driver, ffa_register, ffa_unregister) ++ ++/* FFA transport related */ ++struct ffa_partition_info { ++ u16 id; ++ u16 exec_ctxt; ++/* partition supports receipt of direct requests */ ++#define FFA_PARTITION_DIRECT_RECV BIT(0) ++/* partition can send direct requests. */ ++#define FFA_PARTITION_DIRECT_SEND BIT(1) ++/* partition can send and receive indirect messages. */ ++#define FFA_PARTITION_INDIRECT_MSG BIT(2) ++ u32 properties; ++}; ++ ++/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ ++struct ffa_send_direct_data { ++ unsigned long data0; /* w3/x3 */ ++ unsigned long data1; /* w4/x4 */ ++ unsigned long data2; /* w5/x5 */ ++ unsigned long data3; /* w6/x6 */ ++ unsigned long data4; /* w7/x7 */ ++}; ++ ++struct ffa_mem_region_addr_range { ++ /* The base IPA of the constituent memory region, aligned to 4 kiB */ ++ u64 address; ++ /* The number of 4 kiB pages in the constituent memory region. */ ++ u32 pg_cnt; ++ u32 reserved; ++}; ++ ++struct ffa_composite_mem_region { ++ /* ++ * The total number of 4 kiB pages included in this memory region. This ++ * must be equal to the sum of page counts specified in each ++ * `struct ffa_mem_region_addr_range`. ++ */ ++ u32 total_pg_cnt; ++ /* The number of constituents included in this memory region range */ ++ u32 addr_range_cnt; ++ u64 reserved; ++ /** An array of `addr_range_cnt` memory region constituents. */ ++ struct ffa_mem_region_addr_range constituents[]; ++}; ++ ++struct ffa_mem_region_attributes { ++ /* The ID of the VM to which the memory is being given or shared. */ ++ u16 receiver; ++ /* ++ * The permissions with which the memory region should be mapped in the ++ * receiver's page table. ++ */ ++#define FFA_MEM_EXEC BIT(3) ++#define FFA_MEM_NO_EXEC BIT(2) ++#define FFA_MEM_RW BIT(1) ++#define FFA_MEM_RO BIT(0) ++ u8 attrs; ++ /* ++ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP ++ * for memory regions with multiple borrowers. ++ */ ++#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0) ++ u8 flag; ++ u32 composite_off; ++ /* ++ * Offset in bytes from the start of the outer `ffa_memory_region` to ++ * an `struct ffa_mem_region_addr_range`. ++ */ ++ u64 reserved; ++}; ++ ++struct ffa_mem_region { ++ /* The ID of the VM/owner which originally sent the memory region */ ++ u16 sender_id; ++#define FFA_MEM_NORMAL BIT(5) ++#define FFA_MEM_DEVICE BIT(4) ++ ++#define FFA_MEM_WRITE_BACK (3 << 2) ++#define FFA_MEM_NON_CACHEABLE (1 << 2) ++ ++#define FFA_DEV_nGnRnE (0 << 2) ++#define FFA_DEV_nGnRE (1 << 2) ++#define FFA_DEV_nGRE (2 << 2) ++#define FFA_DEV_GRE (3 << 2) ++ ++#define FFA_MEM_NON_SHAREABLE (0) ++#define FFA_MEM_OUTER_SHAREABLE (2) ++#define FFA_MEM_INNER_SHAREABLE (3) ++ u8 attributes; ++ u8 reserved_0; ++/* ++ * Clear memory region contents after unmapping it from the sender and ++ * before mapping it for any receiver. ++ */ ++#define FFA_MEM_CLEAR BIT(0) ++/* ++ * Whether the hypervisor may time slice the memory sharing or retrieval ++ * operation. ++ */ ++#define FFA_TIME_SLICE_ENABLE BIT(1) ++ ++#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3) ++#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3) ++#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3) ++#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3) ++ ++#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9) ++#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5) ++ /* Flags to control behaviour of the transaction. */ ++ u32 flags; ++#define HANDLE_LOW_MASK GENMASK_ULL(31, 0) ++#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32) ++#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x)))) ++#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x)))) ++ ++#define PACK_HANDLE(l, h) \ ++ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h))) ++ /* ++ * A globally-unique ID assigned by the hypervisor for a region ++ * of memory being sent between VMs. ++ */ ++ u64 handle; ++ /* ++ * An implementation defined value associated with the receiver and the ++ * memory region. ++ */ ++ u64 tag; ++ u32 reserved_1; ++ /* ++ * The number of `ffa_mem_region_attributes` entries included in this ++ * transaction. ++ */ ++ u32 ep_count; ++ /* ++ * An array of endpoint memory access descriptors. ++ * Each one specifies a memory region offset, an endpoint and the ++ * attributes with which this memory region should be mapped in that ++ * endpoint's page table. ++ */ ++ struct ffa_mem_region_attributes ep_mem_access[]; ++}; ++ ++#define COMPOSITE_OFFSET(x) \ ++ (offsetof(struct ffa_mem_region, ep_mem_access[x])) ++#define CONSTITUENTS_OFFSET(x) \ ++ (offsetof(struct ffa_composite_mem_region, constituents[x])) ++#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ ++ (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) ++ ++struct ffa_mem_ops_args { ++ bool use_txbuf; ++ u32 nattrs; ++ u32 flags; ++ u64 tag; ++ u64 g_handle; ++ struct scatterlist *sg; ++ struct ffa_mem_region_attributes *attrs; ++}; ++ ++struct ffa_dev_ops { ++ u32 (*api_version_get)(void); ++ int (*partition_info_get)(const char *uuid_str, ++ struct ffa_partition_info *buffer); ++ void (*mode_32bit_set)(struct ffa_device *dev); ++ int (*sync_send_receive)(struct ffa_device *dev, ++ struct ffa_send_direct_data *data); ++ int (*memory_reclaim)(u64 g_handle, u32 flags); ++ int (*memory_share)(struct ffa_device *dev, ++ struct ffa_mem_ops_args *args); ++ int (*memory_lend)(struct ffa_device *dev, ++ struct ffa_mem_ops_args *args); ++}; ++ ++#endif /* _LINUX_ARM_FFA_H */ +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0009-tee-add-sec_world_id-to-struct-tee_shm.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0009-tee-add-sec_world_id-to-struct-tee_shm.patch new file mode 100644 index 0000000000..eab6527129 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0009-tee-add-sec_world_id-to-struct-tee_shm.patch @@ -0,0 +1,44 @@ +From 812d2a649a9cc2a0004cbde2b3e411b46ec84af4 Mon Sep 17 00:00:00 2001 +From: Jens Wiklander <jens.wiklander@linaro.org> +Date: Thu, 25 Mar 2021 15:08:44 +0100 +Subject: [PATCH 14/22] tee: add sec_world_id to struct tee_shm + +Adds sec_world_id to struct tee_shm which describes a shared memory +object. sec_world_id can be used by a driver to store an id assigned by +secure world. + +Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> + +Upstream-Status: Pending [Not submitted to upstream yet] +--- + include/linux/tee_drv.h | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h +index cdd049a724b1..93d836fded8b 100644 +--- a/include/linux/tee_drv.h ++++ b/include/linux/tee_drv.h +@@ -196,7 +196,11 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + * @num_pages: number of locked pages + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h +- * @id: unique id of a shared memory object on this device ++ * @id: unique id of a shared memory object on this device, shared ++ * with user space ++ * @sec_world_id: ++ * secure world assigned id of this shared memory object, not ++ * used by all drivers + * + * This pool is only supposed to be accessed directly from the TEE + * subsystem and from drivers that implements their own shm pool manager. +@@ -212,6 +216,7 @@ struct tee_shm { + struct dma_buf *dmabuf; + u32 flags; + int id; ++ u64 sec_world_id; + }; + + /** +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0010-optee-simplify-optee_release.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0010-optee-simplify-optee_release.patch new file mode 100644 index 0000000000..94973c77b8 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0010-optee-simplify-optee_release.patch @@ -0,0 +1,179 @@ +From cb4f6a55b9c61a82a65edcd4b18c505d92480710 Mon Sep 17 00:00:00 2001 +From: Jens Wiklander <jens.wiklander@linaro.org> +Date: Thu, 25 Mar 2021 15:08:46 +0100 +Subject: [PATCH 15/22] optee: simplify optee_release() + +Simplifies optee_release() with a new helper function, +optee_close_session_helper() which has been factored out from +optee_close_session(). + +A separate optee_release_supp() is added for the supplicant device. + +Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> + +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/tee/optee/call.c | 31 ++++++++++------- + drivers/tee/optee/core.c | 55 +++++++++++-------------------- + drivers/tee/optee/optee_private.h | 1 + + 3 files changed, 39 insertions(+), 48 deletions(-) + +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c +index 0790de29f0ca..1b339b743ff5 100644 +--- a/drivers/tee/optee/call.c ++++ b/drivers/tee/optee/call.c +@@ -285,12 +285,28 @@ int optee_open_session(struct tee_context *ctx, + return rc; + } + +-int optee_close_session(struct tee_context *ctx, u32 session) ++int optee_close_session_helper(struct tee_context *ctx, u32 session) + { +- struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; ++ ++ shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); ++ if (IS_ERR(shm)) ++ return PTR_ERR(shm); ++ ++ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; ++ msg_arg->session = session; ++ optee_do_call_with_arg(ctx, msg_parg); ++ ++ tee_shm_free(shm); ++ ++ return 0; ++} ++ ++int optee_close_session(struct tee_context *ctx, u32 session) ++{ ++ struct optee_context_data *ctxdata = ctx->data; + struct optee_session *sess; + + /* Check that the session is valid and remove it from the list */ +@@ -303,16 +319,7 @@ int optee_close_session(struct tee_context *ctx, u32 session) + return -EINVAL; + kfree(sess); + +- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); +- if (IS_ERR(shm)) +- return PTR_ERR(shm); +- +- msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; +- msg_arg->session = session; +- optee_do_call_with_arg(ctx, msg_parg); +- +- tee_shm_free(shm); +- return 0; ++ return optee_close_session_helper(ctx, session); + } + + int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, +diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c +index 63542c1cc291..e39c6d290d83 100644 +--- a/drivers/tee/optee/core.c ++++ b/drivers/tee/optee/core.c +@@ -263,59 +263,42 @@ static int optee_open(struct tee_context *ctx) + return 0; + } + +-static void optee_release(struct tee_context *ctx) ++static void optee_release_helper(struct tee_context *ctx, ++ int (*close_session)(struct tee_context *ctx, ++ u32 session)) + { + struct optee_context_data *ctxdata = ctx->data; +- struct tee_device *teedev = ctx->teedev; +- struct optee *optee = tee_get_drvdata(teedev); +- struct tee_shm *shm; +- struct optee_msg_arg *arg = NULL; +- phys_addr_t parg; + struct optee_session *sess; + struct optee_session *sess_tmp; + + if (!ctxdata) + return; + +- shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); +- if (!IS_ERR(shm)) { +- arg = tee_shm_get_va(shm, 0); +- /* +- * If va2pa fails for some reason, we can't call into +- * secure world, only free the memory. Secure OS will leak +- * sessions and finally refuse more sessions, but we will +- * at least let normal world reclaim its memory. +- */ +- if (!IS_ERR(arg)) +- if (tee_shm_va2pa(shm, arg, &parg)) +- arg = NULL; /* prevent usage of parg below */ +- } +- + list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, + list_node) { + list_del(&sess->list_node); +- if (!IS_ERR_OR_NULL(arg)) { +- memset(arg, 0, sizeof(*arg)); +- arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; +- arg->session = sess->session_id; +- optee_do_call_with_arg(ctx, parg); +- } ++ close_session(ctx, sess->session_id); + kfree(sess); + } + kfree(ctxdata); ++ ctx->data = NULL; ++} + +- if (!IS_ERR(shm)) +- tee_shm_free(shm); ++static void optee_release(struct tee_context *ctx) ++{ ++ optee_release_helper(ctx, optee_close_session_helper); ++} + +- ctx->data = NULL; ++static void optee_release_supp(struct tee_context *ctx) ++{ ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + +- if (teedev == optee->supp_teedev) { +- if (optee->scan_bus_wq) { +- destroy_workqueue(optee->scan_bus_wq); +- optee->scan_bus_wq = NULL; +- } +- optee_supp_release(&optee->supp); ++ optee_release_helper(ctx, optee_close_session_helper); ++ if (optee->scan_bus_wq) { ++ destroy_workqueue(optee->scan_bus_wq); ++ optee->scan_bus_wq = NULL; + } ++ optee_supp_release(&optee->supp); + } + + static const struct tee_driver_ops optee_ops = { +@@ -339,7 +322,7 @@ static const struct tee_desc optee_desc = { + static const struct tee_driver_ops optee_supp_ops = { + .get_version = optee_get_version, + .open = optee_open, +- .release = optee_release, ++ .release = optee_release_supp, + .supp_recv = optee_supp_recv, + .supp_send = optee_supp_send, + .shm_register = optee_shm_register_supp, +diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h +index e25b216a14ef..2b63b796645e 100644 +--- a/drivers/tee/optee/optee_private.h ++++ b/drivers/tee/optee/optee_private.h +@@ -152,6 +152,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); + int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); ++int optee_close_session_helper(struct tee_context *ctx, u32 session); + int optee_close_session(struct tee_context *ctx, u32 session); + int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0011-optee-sync-OP-TEE-headers.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0011-optee-sync-OP-TEE-headers.patch new file mode 100644 index 0000000000..5e3e868888 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0011-optee-sync-OP-TEE-headers.patch @@ -0,0 +1,644 @@ +From 1e43bd55c951da0610230c4f28a8ebdd13b30733 Mon Sep 17 00:00:00 2001 +From: Jens Wiklander <jens.wiklander@linaro.org> +Date: Wed, 20 Jan 2021 11:14:12 +0100 +Subject: [PATCH 16/22] optee: sync OP-TEE headers + +Pulls in updates in the internal headers from OP-TEE OS [1]. A few +defines has been shortened, hence the changes in rpc.c. Defines not used +by the driver in tee_rpc_cmd.h has been filtered out. + +Note that this does not change the ABI. + +Link: [1] https://github.com/OP-TEE/optee_os +Reviewed-by: Sumit Garg <sumit.garg@linaro.org> +Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> +Change-Id: I5d20a22a3f38bfc9d232279d5f00505c4d3ba965 + +Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=v5.13-rc7&id=617d8e8b347edcee6da38df0aeb671fc9c9ba19c] +--- + drivers/tee/optee/optee_msg.h | 156 ++---------------------------- + drivers/tee/optee/optee_rpc_cmd.h | 103 ++++++++++++++++++++ + drivers/tee/optee/optee_smc.h | 70 +++++++++----- + drivers/tee/optee/rpc.c | 39 ++++---- + 4 files changed, 179 insertions(+), 189 deletions(-) + create mode 100644 drivers/tee/optee/optee_rpc_cmd.h + +diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h +index c7ac7d02d6cc..5bef6a0165db 100644 +--- a/drivers/tee/optee/optee_msg.h ++++ b/drivers/tee/optee/optee_msg.h +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ + /* +- * Copyright (c) 2015-2019, Linaro Limited ++ * Copyright (c) 2015-2021, Linaro Limited + */ + #ifndef _OPTEE_MSG_H + #define _OPTEE_MSG_H +@@ -12,11 +12,9 @@ + * This file defines the OP-TEE message protocol (ABI) used to communicate + * with an instance of OP-TEE running in secure world. + * +- * This file is divided into three sections. ++ * This file is divided into two sections. + * 1. Formatting of messages. + * 2. Requests from normal world +- * 3. Requests from secure world, Remote Procedure Call (RPC), handled by +- * tee-supplicant. + */ + + /***************************************************************************** +@@ -54,8 +52,8 @@ + * Every entry in buffer should point to a 4k page beginning (12 least + * significant bits must be equal to zero). + * +- * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page +- * offset of the user buffer. ++ * 12 least significant bits of optee_msg_param.u.tmem.buf_ptr should hold ++ * page offset of user buffer. + * + * So, entries should be placed like members of this structure: + * +@@ -178,17 +176,9 @@ struct optee_msg_param { + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further +- * information than what these field holds it can be passed as a parameter ++ * information than what these fields hold it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding +- * attrs field). All parameters tagged as meta has to come first. +- * +- * Temp memref parameters can be fragmented if supported by the Trusted OS +- * (when optee_smc.h is bearer of this protocol this is indicated with +- * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is +- * fragmented then has all but the last fragment the +- * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented +- * it will still be presented as a single logical memref to the Trusted +- * Application. ++ * attrs field). All parameters tagged as meta have to come first. + */ + struct optee_msg_arg { + u32 cmd; +@@ -292,15 +282,12 @@ struct optee_msg_arg { + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +- * [| OPTEE_MSG_ATTR_FRAGMENT] ++ * [| OPTEE_MSG_ATTR_NONCONTIG] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference +- * ... +- * The shared memory can optionally be fragmented, temp memrefs can follow +- * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set. + * +- * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared ++ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisters a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference +@@ -315,131 +302,4 @@ struct optee_msg_arg { + #define OPTEE_MSG_CMD_UNREGISTER_SHM 5 + #define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +-/***************************************************************************** +- * Part 3 - Requests from secure world, RPC +- *****************************************************************************/ +- +-/* +- * All RPC is done with a struct optee_msg_arg as bearer of information, +- * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below +- * +- * RPC communication with tee-supplicant is reversed compared to normal +- * client communication desribed above. The supplicant receives requests +- * and sends responses. +- */ +- +-/* +- * Load a TA into memory, defined in tee-supplicant +- */ +-#define OPTEE_MSG_RPC_CMD_LOAD_TA 0 +- +-/* +- * Reserved +- */ +-#define OPTEE_MSG_RPC_CMD_RPMB 1 +- +-/* +- * File system access, defined in tee-supplicant +- */ +-#define OPTEE_MSG_RPC_CMD_FS 2 +- +-/* +- * Get time +- * +- * Returns number of seconds and nano seconds since the Epoch, +- * 1970-01-01 00:00:00 +0000 (UTC). +- * +- * [out] param[0].u.value.a Number of seconds +- * [out] param[0].u.value.b Number of nano seconds. +- */ +-#define OPTEE_MSG_RPC_CMD_GET_TIME 3 +- +-/* +- * Wait queue primitive, helper for secure world to implement a wait queue. +- * +- * If secure world need to wait for a secure world mutex it issues a sleep +- * request instead of spinning in secure world. Conversely is a wakeup +- * request issued when a secure world mutex with a thread waiting thread is +- * unlocked. +- * +- * Waiting on a key +- * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP +- * [in] param[0].u.value.b wait key +- * +- * Waking up a key +- * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP +- * [in] param[0].u.value.b wakeup key +- */ +-#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4 +-#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0 +-#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1 +- +-/* +- * Suspend execution +- * +- * [in] param[0].value .a number of milliseconds to suspend +- */ +-#define OPTEE_MSG_RPC_CMD_SUSPEND 5 +- +-/* +- * Allocate a piece of shared memory +- * +- * Shared memory can optionally be fragmented, to support that additional +- * spare param entries are allocated to make room for eventual fragments. +- * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when +- * unused. All returned temp memrefs except the last should have the +- * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field. +- * +- * [in] param[0].u.value.a type of memory one of +- * OPTEE_MSG_RPC_SHM_TYPE_* below +- * [in] param[0].u.value.b requested size +- * [in] param[0].u.value.c required alignment +- * +- * [out] param[0].u.tmem.buf_ptr physical address (of first fragment) +- * [out] param[0].u.tmem.size size (of first fragment) +- * [out] param[0].u.tmem.shm_ref shared memory reference +- * ... +- * [out] param[n].u.tmem.buf_ptr physical address +- * [out] param[n].u.tmem.size size +- * [out] param[n].u.tmem.shm_ref shared memory reference (same value +- * as in param[n-1].u.tmem.shm_ref) +- */ +-#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 +-/* Memory that can be shared with a non-secure user space application */ +-#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +-/* Memory only shared with non-secure kernel */ +-#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 +- +-/* +- * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC +- * +- * [in] param[0].u.value.a type of memory one of +- * OPTEE_MSG_RPC_SHM_TYPE_* above +- * [in] param[0].u.value.b value of shared memory reference +- * returned in param[0].u.tmem.shm_ref +- * above +- */ +-#define OPTEE_MSG_RPC_CMD_SHM_FREE 7 +- +-/* +- * Access a device on an i2c bus +- * +- * [in] param[0].u.value.a mode: RD(0), WR(1) +- * [in] param[0].u.value.b i2c adapter +- * [in] param[0].u.value.c i2c chip +- * +- * [in] param[1].u.value.a i2c control flags +- * +- * [in/out] memref[2] buffer to exchange the transfer data +- * with the secure world +- * +- * [out] param[3].u.value.a bytes transferred by the driver +- */ +-#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER 21 +-/* I2C master transfer modes */ +-#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD 0 +-#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR 1 +-/* I2C master control flags */ +-#define OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT BIT(0) +- + #endif /* _OPTEE_MSG_H */ +diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h +new file mode 100644 +index 000000000000..b8275140cef8 +--- /dev/null ++++ b/drivers/tee/optee/optee_rpc_cmd.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: BSD-2-Clause */ ++/* ++ * Copyright (c) 2016-2021, Linaro Limited ++ */ ++ ++#ifndef __OPTEE_RPC_CMD_H ++#define __OPTEE_RPC_CMD_H ++ ++/* ++ * All RPC is done with a struct optee_msg_arg as bearer of information, ++ * struct optee_msg_arg::arg holds values defined by OPTEE_RPC_CMD_* below. ++ * Only the commands handled by the kernel driver are defined here. ++ * ++ * RPC communication with tee-supplicant is reversed compared to normal ++ * client communication described above. The supplicant receives requests ++ * and sends responses. ++ */ ++ ++/* ++ * Get time ++ * ++ * Returns number of seconds and nano seconds since the Epoch, ++ * 1970-01-01 00:00:00 +0000 (UTC). ++ * ++ * [out] value[0].a Number of seconds ++ * [out] value[0].b Number of nano seconds. ++ */ ++#define OPTEE_RPC_CMD_GET_TIME 3 ++ ++/* ++ * Wait queue primitive, helper for secure world to implement a wait queue. ++ * ++ * If secure world needs to wait for a secure world mutex it issues a sleep ++ * request instead of spinning in secure world. Conversely is a wakeup ++ * request issued when a secure world mutex with a thread waiting thread is ++ * unlocked. ++ * ++ * Waiting on a key ++ * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP ++ * [in] value[0].b Wait key ++ * ++ * Waking up a key ++ * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP ++ * [in] value[0].b Wakeup key ++ */ ++#define OPTEE_RPC_CMD_WAIT_QUEUE 4 ++#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0 ++#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1 ++ ++/* ++ * Suspend execution ++ * ++ * [in] value[0].a Number of milliseconds to suspend ++ */ ++#define OPTEE_RPC_CMD_SUSPEND 5 ++ ++/* ++ * Allocate a piece of shared memory ++ * ++ * [in] value[0].a Type of memory one of ++ * OPTEE_RPC_SHM_TYPE_* below ++ * [in] value[0].b Requested size ++ * [in] value[0].c Required alignment ++ * [out] memref[0] Buffer ++ */ ++#define OPTEE_RPC_CMD_SHM_ALLOC 6 ++/* Memory that can be shared with a non-secure user space application */ ++#define OPTEE_RPC_SHM_TYPE_APPL 0 ++/* Memory only shared with non-secure kernel */ ++#define OPTEE_RPC_SHM_TYPE_KERNEL 1 ++ ++/* ++ * Free shared memory previously allocated with OPTEE_RPC_CMD_SHM_ALLOC ++ * ++ * [in] value[0].a Type of memory one of ++ * OPTEE_RPC_SHM_TYPE_* above ++ * [in] value[0].b Value of shared memory reference or cookie ++ */ ++#define OPTEE_RPC_CMD_SHM_FREE 7 ++ ++/* ++ * Issue master requests (read and write operations) to an I2C chip. ++ * ++ * [in] value[0].a Transfer mode (OPTEE_RPC_I2C_TRANSFER_*) ++ * [in] value[0].b The I2C bus (a.k.a adapter). ++ * 16 bit field. ++ * [in] value[0].c The I2C chip (a.k.a address). ++ * 16 bit field (either 7 or 10 bit effective). ++ * [in] value[1].a The I2C master control flags (ie, 10 bit address). ++ * 16 bit field. ++ * [in/out] memref[2] Buffer used for data transfers. ++ * [out] value[3].a Number of bytes transferred by the REE. ++ */ ++#define OPTEE_RPC_CMD_I2C_TRANSFER 21 ++ ++/* I2C master transfer modes */ ++#define OPTEE_RPC_I2C_TRANSFER_RD 0 ++#define OPTEE_RPC_I2C_TRANSFER_WR 1 ++ ++/* I2C master control flags */ ++#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0) ++ ++#endif /*__OPTEE_RPC_CMD_H*/ +diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h +index 777ad54d4c2c..821e1c30c150 100644 +--- a/drivers/tee/optee/optee_smc.h ++++ b/drivers/tee/optee/optee_smc.h +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ + /* +- * Copyright (c) 2015-2019, Linaro Limited ++ * Copyright (c) 2015-2021, Linaro Limited + */ + #ifndef OPTEE_SMC_H + #define OPTEE_SMC_H +@@ -39,10 +39,10 @@ + /* + * Function specified by SMC Calling convention + * +- * Return one of the following UIDs if using API specified in this file +- * without further extentions: +- * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 +- * see also OPTEE_SMC_UID_* in optee_msg.h ++ * Return the following UID if using API specified in this file ++ * without further extensions: ++ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. ++ * see also OPTEE_MSG_UID_* in optee_msg.h + */ + #define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID + #define OPTEE_SMC_CALLS_UID \ +@@ -53,7 +53,7 @@ + /* + * Function specified by SMC Calling convention + * +- * Returns 2.0 if using API specified in this file without further extentions. ++ * Returns 2.0 if using API specified in this file without further extensions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ + #define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +@@ -109,8 +109,8 @@ struct optee_smc_call_get_os_revision_result { + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG +- * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg +- * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg ++ * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg ++ * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used +@@ -214,8 +214,9 @@ struct optee_smc_get_shm_config_result { + * secure world accepts command buffers located in any parts of non-secure RAM + */ + #define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2) +- +-/* Secure world supports Shared Memory with a NULL buffer reference */ ++/* Secure world is built with virtualization support */ ++#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3) ++/* Secure world supports Shared Memory with a NULL reference */ + #define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4) + + #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +@@ -245,8 +246,8 @@ struct optee_smc_exchange_capabilities_result { + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK +- * a1 Upper 32bit of a 64bit Shared memory cookie +- * a2 Lower 32bit of a 64bit Shared memory cookie ++ * a1 Upper 32 bits of a 64-bit Shared memory cookie ++ * a2 Lower 32 bits of a 64-bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: +@@ -293,6 +294,31 @@ struct optee_smc_disable_shm_cache_result { + #define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + ++/* ++ * Query OP-TEE about number of supported threads ++ * ++ * Normal World OS or Hypervisor issues this call to find out how many ++ * threads OP-TEE supports. That is how many standard calls can be issued ++ * in parallel before OP-TEE will return OPTEE_SMC_RETURN_ETHREAD_LIMIT. ++ * ++ * Call requests usage: ++ * a0 SMC Function ID, OPTEE_SMC_GET_THREAD_COUNT ++ * a1-6 Not used ++ * a7 Hypervisor Client ID register ++ * ++ * Normal return register usage: ++ * a0 OPTEE_SMC_RETURN_OK ++ * a1 Number of threads ++ * a2-7 Preserved ++ * ++ * Error return: ++ * a0 OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Requested call is not implemented ++ * a1-7 Preserved ++ */ ++#define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15 ++#define OPTEE_SMC_GET_THREAD_COUNT \ ++ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT) ++ + /* + * Resume from RPC (for example after processing a foreign interrupt) + * +@@ -341,16 +367,16 @@ struct optee_smc_disable_shm_cache_result { + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. +- * a1 Upper 32bits of 64bit physical pointer to allocated ++ * a1 Upper 32 bits of 64-bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. +- * a2 Lower 32bits of 64bit physical pointer to allocated ++ * a2 Lower 32 bits of 64-bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved +- * a4 Upper 32bits of 64bit Shared memory cookie used when freeing ++ * a4 Upper 32 bits of 64-bit Shared memory cookie used when freeing + * the memory or doing an RPC +- * a5 Lower 32bits of 64bit Shared memory cookie used when freeing ++ * a5 Lower 32 bits of 64-bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +@@ -363,9 +389,9 @@ struct optee_smc_disable_shm_cache_result { + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE +- * a1 Upper 32bits of 64bit shared memory cookie belonging to this ++ * a1 Upper 32 bits of 64-bit shared memory cookie belonging to this + * argument memory +- * a2 Lower 32bits of 64bit shared memory cookie belonging to this ++ * a2 Lower 32 bits of 64-bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * +@@ -379,7 +405,7 @@ struct optee_smc_disable_shm_cache_result { + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + + /* +- * Deliver foreign interrupt to normal world. ++ * Deliver a foreign interrupt in normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR +@@ -389,7 +415,7 @@ struct optee_smc_disable_shm_cache_result { + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +-#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 ++#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 + #define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR) + +@@ -405,10 +431,10 @@ struct optee_smc_disable_shm_cache_result { + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD +- * a1 Upper 32bit of a 64bit Shared memory cookie holding a ++ * a1 Upper 32 bits of a 64-bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated +- * a2 Lower 32bit of a 64bit Shared memory cookie holding a ++ * a2 Lower 32 bits of a 64-bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved +diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c +index 6cbb3643c6c4..1849180b0278 100644 +--- a/drivers/tee/optee/rpc.c ++++ b/drivers/tee/optee/rpc.c +@@ -12,6 +12,7 @@ + #include <linux/tee_drv.h> + #include "optee_private.h" + #include "optee_smc.h" ++#include "optee_rpc_cmd.h" + + struct wq_entry { + struct list_head link; +@@ -90,7 +91,7 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, + if (!adapter) + goto bad; + +- if (params[1].u.value.a & OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT) { ++ if (params[1].u.value.a & OPTEE_RPC_I2C_FLAGS_TEN_BIT) { + if (!i2c_check_functionality(adapter, + I2C_FUNC_10BIT_ADDR)) { + i2c_put_adapter(adapter); +@@ -105,10 +106,10 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, + msg.len = params[2].u.memref.size; + + switch (params[0].u.value.a) { +- case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD: ++ case OPTEE_RPC_I2C_TRANSFER_RD: + msg.flags |= I2C_M_RD; + break; +- case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR: ++ case OPTEE_RPC_I2C_TRANSFER_WR: + break; + default: + i2c_put_adapter(adapter); +@@ -195,10 +196,10 @@ static void handle_rpc_func_cmd_wq(struct optee *optee, + goto bad; + + switch (arg->params[0].u.value.a) { +- case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: ++ case OPTEE_RPC_WAIT_QUEUE_SLEEP: + wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + break; +- case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: ++ case OPTEE_RPC_WAIT_QUEUE_WAKEUP: + wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + break; + default: +@@ -268,11 +269,11 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) + struct tee_shm *shm; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; +- param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; ++ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL; + param.u.value.b = sz; + param.u.value.c = 0; + +- ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); ++ ret = optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m); + if (ret) + return ERR_PTR(-ENOMEM); + +@@ -309,10 +310,10 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + + sz = arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { +- case OPTEE_MSG_RPC_SHM_TYPE_APPL: ++ case OPTEE_RPC_SHM_TYPE_APPL: + shm = cmd_alloc_suppl(ctx, sz); + break; +- case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: ++ case OPTEE_RPC_SHM_TYPE_KERNEL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + break; + default: +@@ -384,7 +385,7 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) + struct tee_param param; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; +- param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; ++ param.u.value.a = OPTEE_RPC_SHM_TYPE_APPL; + param.u.value.b = tee_shm_get_id(shm); + param.u.value.c = 0; + +@@ -401,7 +402,7 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) + */ + tee_shm_put(shm); + +- optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); ++ optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, ¶m); + } + + static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, +@@ -419,10 +420,10 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, + + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { +- case OPTEE_MSG_RPC_SHM_TYPE_APPL: ++ case OPTEE_RPC_SHM_TYPE_APPL: + cmd_free_suppl(ctx, shm); + break; +- case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: ++ case OPTEE_RPC_SHM_TYPE_KERNEL: + tee_shm_free(shm); + break; + default: +@@ -459,23 +460,23 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + } + + switch (arg->cmd) { +- case OPTEE_MSG_RPC_CMD_GET_TIME: ++ case OPTEE_RPC_CMD_GET_TIME: + handle_rpc_func_cmd_get_time(arg); + break; +- case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: ++ case OPTEE_RPC_CMD_WAIT_QUEUE: + handle_rpc_func_cmd_wq(optee, arg); + break; +- case OPTEE_MSG_RPC_CMD_SUSPEND: ++ case OPTEE_RPC_CMD_SUSPEND: + handle_rpc_func_cmd_wait(arg); + break; +- case OPTEE_MSG_RPC_CMD_SHM_ALLOC: ++ case OPTEE_RPC_CMD_SHM_ALLOC: + free_pages_list(call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + break; +- case OPTEE_MSG_RPC_CMD_SHM_FREE: ++ case OPTEE_RPC_CMD_SHM_FREE: + handle_rpc_func_cmd_shm_free(ctx, arg); + break; +- case OPTEE_MSG_RPC_CMD_I2C_TRANSFER: ++ case OPTEE_RPC_CMD_I2C_TRANSFER: + handle_rpc_func_cmd_i2c_transfer(ctx, arg); + break; + default: +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0012-optee-refactor-driver-with-internal-callbacks.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0012-optee-refactor-driver-with-internal-callbacks.patch new file mode 100644 index 0000000000..083843d1b1 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0012-optee-refactor-driver-with-internal-callbacks.patch @@ -0,0 +1,721 @@ +From abda5d14075802b84fe9e38f77bfdc371606172c Mon Sep 17 00:00:00 2001 +From: Jens Wiklander <jens.wiklander@linaro.org> +Date: Thu, 25 Mar 2021 15:08:50 +0100 +Subject: [PATCH 17/22] optee: refactor driver with internal callbacks + +The OP-TEE driver is refactored with three internal callbacks replacing +direct calls to optee_from_msg_param(), optee_to_msg_param() and +optee_do_call_with_arg(). + +These functions a central to communicating with OP-TEE in secure world +by using the SMC Calling Convention directly. + +This refactoring makes room for using other primitives to communicate +with OP-TEE in secure world while being able to reuse as much as +possible from the present driver. + +Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> + +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/tee/optee/call.c | 86 +++++++++-------- + drivers/tee/optee/core.c | 148 ++++++++++++++++++++---------- + drivers/tee/optee/optee_private.h | 35 +++++-- + drivers/tee/optee/rpc.c | 19 ++-- + 4 files changed, 182 insertions(+), 106 deletions(-) + +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c +index 1b339b743ff5..e7b93153252c 100644 +--- a/drivers/tee/optee/call.c ++++ b/drivers/tee/optee/call.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* +- * Copyright (c) 2015, Linaro Limited ++ * Copyright (c) 2015-2021, Linaro Limited + */ + #include <linux/arm-smccc.h> + #include <linux/device.h> +@@ -116,20 +116,25 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata, + /** + * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * @ctx: calling context +- * @parg: physical address of message to pass to secure world ++ * @arg: shared memory holding the message to pass to secure world + * + * Does and SMC to OP-TEE in secure world and handles eventual resulting + * Remote Procedure Calls (RPC) from OP-TEE. + * + * Returns return code from secure world, 0 is OK + */ +-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) ++int optee_do_call_with_arg(struct tee_context *ctx, struct tee_shm *arg) + { + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_call_waiter w; + struct optee_rpc_param param = { }; + struct optee_call_ctx call_ctx = { }; +- u32 ret; ++ phys_addr_t parg; ++ int rc; ++ ++ rc = tee_shm_get_pa(arg, 0, &parg); ++ if (rc) ++ return rc; + + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); +@@ -157,7 +162,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) + param.a3 = res.a3; + optee_handle_rpc(ctx, ¶m, &call_ctx); + } else { +- ret = res.a0; ++ rc = res.a0; + break; + } + } +@@ -169,14 +174,12 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) + */ + optee_cq_wait_final(&optee->call_queue, &w); + +- return ret; ++ return rc; + } + + static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, +- struct optee_msg_arg **msg_arg, +- phys_addr_t *msg_parg) ++ struct optee_msg_arg **msg_arg) + { +- int rc; + struct tee_shm *shm; + struct optee_msg_arg *ma; + +@@ -187,22 +190,13 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + + ma = tee_shm_get_va(shm, 0); + if (IS_ERR(ma)) { +- rc = PTR_ERR(ma); +- goto out; ++ tee_shm_free(shm); ++ return (void *)ma; + } + +- rc = tee_shm_get_pa(shm, 0, msg_parg); +- if (rc) +- goto out; +- + memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + ma->num_params = num_params; + *msg_arg = ma; +-out: +- if (rc) { +- tee_shm_free(shm); +- return ERR_PTR(rc); +- } + + return shm; + } +@@ -211,16 +205,16 @@ int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) + { ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_context_data *ctxdata = ctx->data; + int rc; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; +- phys_addr_t msg_parg; + struct optee_session *sess = NULL; + uuid_t client_uuid; + + /* +2 for the meta parameters added below */ +- shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); ++ shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + +@@ -244,7 +238,8 @@ int optee_open_session(struct tee_context *ctx, + goto out; + export_uuid(msg_arg->params[1].u.octets, &client_uuid); + +- rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); ++ rc = optee->ops->to_msg_param(optee, msg_arg->params + 2, ++ arg->num_params, param); + if (rc) + goto out; + +@@ -254,7 +249,7 @@ int optee_open_session(struct tee_context *ctx, + goto out; + } + +- if (optee_do_call_with_arg(ctx, msg_parg)) { ++ if (optee->ops->do_call_with_arg(ctx, shm)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } +@@ -269,7 +264,8 @@ int optee_open_session(struct tee_context *ctx, + kfree(sess); + } + +- if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { ++ if (optee->ops->from_msg_param(optee, param, arg->num_params, ++ msg_arg->params + 2)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + /* Close session again to avoid leakage */ +@@ -288,16 +284,16 @@ int optee_open_session(struct tee_context *ctx, + int optee_close_session_helper(struct tee_context *ctx, u32 session) + { + struct tee_shm *shm; ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_msg_arg *msg_arg; +- phys_addr_t msg_parg; + +- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); ++ shm = get_msg_arg(ctx, 0, &msg_arg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; +- optee_do_call_with_arg(ctx, msg_parg); ++ optee->ops->do_call_with_arg(ctx, shm); + + tee_shm_free(shm); + +@@ -325,10 +321,10 @@ int optee_close_session(struct tee_context *ctx, u32 session) + int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) + { ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; +- phys_addr_t msg_parg; + struct optee_session *sess; + int rc; + +@@ -339,7 +335,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + if (!sess) + return -EINVAL; + +- shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); ++ shm = get_msg_arg(ctx, arg->num_params, &msg_arg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; +@@ -347,16 +343,18 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + msg_arg->session = arg->session; + msg_arg->cancel_id = arg->cancel_id; + +- rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); ++ rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params, ++ param); + if (rc) + goto out; + +- if (optee_do_call_with_arg(ctx, msg_parg)) { ++ if (optee->ops->do_call_with_arg(ctx, shm)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + +- if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { ++ if (optee->ops->from_msg_param(optee, param, arg->num_params, ++ msg_arg->params)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } +@@ -370,10 +368,10 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + + int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) + { ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; +- phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid */ +@@ -383,14 +381,14 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) + if (!sess) + return -EINVAL; + +- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); ++ shm = get_msg_arg(ctx, 0, &msg_arg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; + msg_arg->session = session; + msg_arg->cancel_id = cancel_id; +- optee_do_call_with_arg(ctx, msg_parg); ++ optee->ops->do_call_with_arg(ctx, shm); + + tee_shm_free(shm); + return 0; +@@ -589,10 +587,10 @@ int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start) + { +- struct tee_shm *shm_arg = NULL; ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_msg_arg *msg_arg; ++ struct tee_shm *shm_arg; + u64 *pages_list; +- phys_addr_t msg_parg; + int rc; + + if (!num_pages) +@@ -606,7 +604,7 @@ int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + if (!pages_list) + return -ENOMEM; + +- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); ++ shm_arg = get_msg_arg(ctx, 1, &msg_arg); + if (IS_ERR(shm_arg)) { + rc = PTR_ERR(shm_arg); + goto out; +@@ -627,7 +625,7 @@ int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | + (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); + +- if (optee_do_call_with_arg(ctx, msg_parg) || ++ if (optee->ops->do_call_with_arg(ctx, shm) || + msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + +@@ -639,12 +637,12 @@ int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + + int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) + { +- struct tee_shm *shm_arg; ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_msg_arg *msg_arg; +- phys_addr_t msg_parg; ++ struct tee_shm *shm_arg; + int rc = 0; + +- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); ++ shm_arg = get_msg_arg(ctx, 1, &msg_arg); + if (IS_ERR(shm_arg)) + return PTR_ERR(shm_arg); + +@@ -653,7 +651,7 @@ int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; + +- if (optee_do_call_with_arg(ctx, msg_parg) || ++ if (optee->ops->do_call_with_arg(ctx, shm) || + msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + tee_shm_free(shm_arg); +diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c +index e39c6d290d83..ab602bb8e14a 100644 +--- a/drivers/tee/optee/core.c ++++ b/drivers/tee/optee/core.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* +- * Copyright (c) 2015, Linaro Limited ++ * Copyright (c) 2015-2021, Linaro Limited + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +@@ -26,21 +26,87 @@ + + #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES + ++static void from_msg_param_value(struct tee_param *p, u32 attr, ++ const struct optee_msg_param *mp) ++{ ++ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + ++ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; ++ p->u.value.a = mp->u.value.a; ++ p->u.value.b = mp->u.value.b; ++ p->u.value.c = mp->u.value.c; ++} ++ ++static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, ++ const struct optee_msg_param *mp) ++{ ++ struct tee_shm *shm; ++ phys_addr_t pa; ++ int rc; ++ ++ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + ++ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; ++ p->u.memref.size = mp->u.tmem.size; ++ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref; ++ if (!shm) { ++ p->u.memref.shm_offs = 0; ++ p->u.memref.shm = NULL; ++ return 0; ++ } ++ ++ rc = tee_shm_get_pa(shm, 0, &pa); ++ if (rc) ++ return rc; ++ ++ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; ++ p->u.memref.shm = shm; ++ ++ /* Check that the memref is covered by the shm object */ ++ if (p->u.memref.size) { ++ size_t o = p->u.memref.shm_offs + ++ p->u.memref.size - 1; ++ ++ rc = tee_shm_get_pa(shm, o, NULL); ++ if (rc) ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void from_msg_param_reg_mem(struct tee_param *p, u32 attr, ++ const struct optee_msg_param *mp) ++{ ++ struct tee_shm *shm; ++ ++ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + ++ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; ++ p->u.memref.size = mp->u.rmem.size; ++ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref; ++ ++ if (shm) { ++ p->u.memref.shm_offs = mp->u.rmem.offs; ++ p->u.memref.shm = shm; ++ } else { ++ p->u.memref.shm_offs = 0; ++ p->u.memref.shm = NULL; ++ } ++} ++ + /** + * optee_from_msg_param() - convert from OPTEE_MSG parameters to + * struct tee_param ++ * @optee: main service struct + * @params: subsystem internal parameter representation + * @num_params: number of elements in the parameter arrays + * @msg_params: OPTEE_MSG parameters + * Returns 0 on success or <0 on failure + */ +-int optee_from_msg_param(struct tee_param *params, size_t num_params, +- const struct optee_msg_param *msg_params) ++static int optee_from_msg_param(struct optee *optee, struct tee_param *params, ++ size_t num_params, ++ const struct optee_msg_param *msg_params) + { + int rc; + size_t n; +- struct tee_shm *shm; +- phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; +@@ -55,48 +121,19 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params, + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: +- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + +- attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; +- p->u.value.a = mp->u.value.a; +- p->u.value.b = mp->u.value.b; +- p->u.value.c = mp->u.value.c; ++ from_msg_param_value(p, attr, mp); + break; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: +- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + +- attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; +- p->u.memref.size = mp->u.tmem.size; +- shm = (struct tee_shm *)(unsigned long) +- mp->u.tmem.shm_ref; +- if (!shm) { +- p->u.memref.shm_offs = 0; +- p->u.memref.shm = NULL; +- break; +- } +- rc = tee_shm_get_pa(shm, 0, &pa); ++ rc = from_msg_param_tmp_mem(p, attr, mp); + if (rc) + return rc; +- p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; +- p->u.memref.shm = shm; + break; + case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: +- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + +- attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; +- p->u.memref.size = mp->u.rmem.size; +- shm = (struct tee_shm *)(unsigned long) +- mp->u.rmem.shm_ref; +- +- if (!shm) { +- p->u.memref.shm_offs = 0; +- p->u.memref.shm = NULL; +- break; +- } +- p->u.memref.shm_offs = mp->u.rmem.offs; +- p->u.memref.shm = shm; +- ++ from_msg_param_reg_mem(p, attr, mp); + break; + + default: +@@ -106,6 +143,16 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params, + return 0; + } + ++static void to_msg_param_value(struct optee_msg_param *mp, ++ const struct tee_param *p) ++{ ++ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - ++ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; ++ mp->u.value.a = p->u.value.a; ++ mp->u.value.b = p->u.value.b; ++ mp->u.value.c = p->u.value.c; ++} ++ + static int to_msg_param_tmp_mem(struct optee_msg_param *mp, + const struct tee_param *p) + { +@@ -148,13 +195,15 @@ static int to_msg_param_reg_mem(struct optee_msg_param *mp, + + /** + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters ++ * @optee: main service struct + * @msg_params: OPTEE_MSG parameters + * @num_params: number of elements in the parameter arrays + * @params: subsystem itnernal parameter representation + * Returns 0 on success or <0 on failure + */ +-int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, +- const struct tee_param *params) ++static int optee_to_msg_param(struct optee *optee, ++ struct optee_msg_param *msg_params, ++ size_t num_params, const struct tee_param *params) + { + int rc; + size_t n; +@@ -171,11 +220,7 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: +- mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - +- TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; +- mp->u.value.a = p->u.value.a; +- mp->u.value.b = p->u.value.b; +- mp->u.value.c = p->u.value.c; ++ to_msg_param_value(mp, p); + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: +@@ -301,7 +346,7 @@ static void optee_release_supp(struct tee_context *ctx) + optee_supp_release(&optee->supp); + } + +-static const struct tee_driver_ops optee_ops = { ++static const struct tee_driver_ops optee_clnt_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, +@@ -313,9 +358,9 @@ static const struct tee_driver_ops optee_ops = { + .shm_unregister = optee_shm_unregister, + }; + +-static const struct tee_desc optee_desc = { ++static const struct tee_desc optee_clnt_desc = { + .name = DRIVER_NAME "-clnt", +- .ops = &optee_ops, ++ .ops = &optee_clnt_ops, + .owner = THIS_MODULE, + }; + +@@ -336,6 +381,12 @@ static const struct tee_desc optee_supp_desc = { + .flags = TEE_DESC_PRIVILEGED, + }; + ++static const struct optee_ops optee_ops = { ++ .do_call_with_arg = optee_do_call_with_arg, ++ .to_msg_param = optee_to_msg_param, ++ .from_msg_param = optee_from_msg_param, ++}; ++ + static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) + { + struct arm_smccc_res res; +@@ -637,10 +688,11 @@ static int optee_probe(struct platform_device *pdev) + goto err; + } + ++ optee->ops = &optee_ops; + optee->invoke_fn = invoke_fn; + optee->sec_caps = sec_caps; + +- teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); ++ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; +diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h +index 2b63b796645e..c5741e96e967 100644 +--- a/drivers/tee/optee/optee_private.h ++++ b/drivers/tee/optee/optee_private.h +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: GPL-2.0-only */ + /* +- * Copyright (c) 2015, Linaro Limited ++ * Copyright (c) 2015-2021, Linaro Limited + */ + + #ifndef OPTEE_PRIVATE_H +@@ -66,9 +66,34 @@ struct optee_supp { + struct completion reqs_c; + }; + ++struct optee; ++ ++/** ++ * struct optee_ops - OP-TEE driver internal operations ++ * @do_call_with_arg: enters OP-TEE in secure world ++ * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters ++ * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param ++ * ++ * These OPs are only supposed to be used internally in the OP-TEE driver ++ * as a way of abstracting the different methogs of entering OP-TEE in ++ * secure world. ++ */ ++struct optee_ops { ++ int (*do_call_with_arg)(struct tee_context *ctx, ++ struct tee_shm *shm_arg); ++ int (*to_msg_param)(struct optee *optee, ++ struct optee_msg_param *msg_params, ++ size_t num_params, const struct tee_param *params); ++ int (*from_msg_param)(struct optee *optee, struct tee_param *params, ++ size_t num_params, ++ const struct optee_msg_param *msg_params); ++}; ++ + /** + * struct optee - main service struct + * @supp_teedev: supplicant device ++ * @ops: internal callbacks for different ways to reach secure ++ * world + * @teedev: client device + * @invoke_fn: function to issue smc or hvc + * @call_queue: queue of threads waiting to call @invoke_fn +@@ -86,6 +111,7 @@ struct optee_supp { + struct optee { + struct tee_device *supp_teedev; + struct tee_device *teedev; ++ const struct optee_ops *ops; + optee_invoke_fn *invoke_fn; + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; +@@ -148,7 +174,7 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + +-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); ++int optee_do_call_with_arg(struct tee_context *ctx, struct tee_shm *arg); + int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); +@@ -171,11 +197,6 @@ int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, + unsigned long start); + int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm); + +-int optee_from_msg_param(struct tee_param *params, size_t num_params, +- const struct optee_msg_param *msg_params); +-int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, +- const struct tee_param *params); +- + u64 *optee_allocate_pages_list(size_t num_entries); + void optee_free_pages_list(void *array, size_t num_entries); + void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, +diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c +index 1849180b0278..39562fb6841e 100644 +--- a/drivers/tee/optee/rpc.c ++++ b/drivers/tee/optee/rpc.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* +- * Copyright (c) 2015-2016, Linaro Limited ++ * Copyright (c) 2015-2021, Linaro Limited + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +@@ -55,6 +55,7 @@ static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) + static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, + struct optee_msg_arg *arg) + { ++ struct optee *optee = tee_get_drvdata(ctx->teedev); + struct tee_param *params; + struct i2c_adapter *adapter; + struct i2c_msg msg = { }; +@@ -79,7 +80,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, + return; + } + +- if (optee_from_msg_param(params, arg->num_params, arg->params)) ++ if (optee->ops->from_msg_param(optee, params, arg->num_params, ++ arg->params)) + goto bad; + + for (i = 0; i < arg->num_params; i++) { +@@ -122,7 +124,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, + arg->ret = TEEC_ERROR_COMMUNICATION; + } else { + params[3].u.value.a = msg.len; +- if (optee_to_msg_param(arg->params, arg->num_params, params)) ++ if (optee->ops->to_msg_param(optee, arg->params, ++ arg->num_params, params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + else + arg->ret = TEEC_SUCCESS; +@@ -234,7 +237,7 @@ static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + } + +-static void handle_rpc_supp_cmd(struct tee_context *ctx, ++static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee, + struct optee_msg_arg *arg) + { + struct tee_param *params; +@@ -248,14 +251,16 @@ static void handle_rpc_supp_cmd(struct tee_context *ctx, + return; + } + +- if (optee_from_msg_param(params, arg->num_params, arg->params)) { ++ if (optee->ops->from_msg_param(optee, params, arg->num_params, ++ arg->params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); + +- if (optee_to_msg_param(arg->params, arg->num_params, params)) ++ if (optee->ops->to_msg_param(optee, arg->params, arg->num_params, ++ params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + out: + kfree(params); +@@ -480,7 +485,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + handle_rpc_func_cmd_i2c_transfer(ctx, arg); + break; + default: +- handle_rpc_supp_cmd(ctx, arg); ++ handle_rpc_supp_cmd(ctx, optee, arg); + } + } + +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0013-optee-add-a-FF-A-memory-pool.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0013-optee-add-a-FF-A-memory-pool.patch new file mode 100644 index 0000000000..6be15814a0 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0013-optee-add-a-FF-A-memory-pool.patch @@ -0,0 +1,131 @@ +From eafffa586795e3cb485310fbd287322c9c7dc3bb Mon Sep 17 00:00:00 2001 +From: Jens Wiklander <jens.wiklander@linaro.org> +Date: Thu, 25 Mar 2021 15:08:52 +0100 +Subject: [PATCH 18/22] optee: add a FF-A memory pool + +Adds a memory pool to be used when the driver uses FF-A [1] as transport +layer. + +[1] https://developer.arm.com/documentation/den0077/latest +Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> + +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/tee/optee/shm_pool.c | 65 +++++++++++++++++++++++++++++++++--- + drivers/tee/optee/shm_pool.h | 1 + + 2 files changed, 61 insertions(+), 5 deletions(-) + +diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c +index d767eebf30bd..d2116cb39c8b 100644 +--- a/drivers/tee/optee/shm_pool.c ++++ b/drivers/tee/optee/shm_pool.c +@@ -12,8 +12,14 @@ + #include "optee_smc.h" + #include "shm_pool.h" + +-static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, +- struct tee_shm *shm, size_t size) ++static int ++pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm, ++ struct tee_shm *shm, size_t size, ++ int (*shm_register)(struct tee_context *ctx, ++ struct tee_shm *shm, ++ struct page **pages, ++ size_t num_pages, ++ unsigned long start)) + { + unsigned int order = get_order(size); + struct page *page; +@@ -27,7 +33,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, + shm->paddr = page_to_phys(page); + shm->size = PAGE_SIZE << order; + +- if (shm->flags & TEE_SHM_DMA_BUF) { ++ if (shm_register) { + unsigned int nr_pages = 1 << order, i; + struct page **pages; + +@@ -41,14 +47,23 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, + } + + shm->flags |= TEE_SHM_REGISTER; +- rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, +- (unsigned long)shm->kaddr); ++ rc = shm_register(shm->ctx, shm, pages, nr_pages, ++ (unsigned long)shm->kaddr); + kfree(pages); + } + + return rc; + } + ++static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, ++ struct tee_shm *shm, size_t size) ++{ ++ if (!(shm->flags & TEE_SHM_DMA_BUF)) ++ return pool_op_alloc_helper(poolm, shm, size, NULL); ++ ++ return pool_op_alloc_helper(poolm, shm, size, optee_shm_register); ++} ++ + static void pool_op_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) + { +@@ -87,3 +102,43 @@ struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) + + return mgr; + } ++ ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm, ++ struct tee_shm *shm, size_t size) ++{ ++ return pool_op_alloc_helper(poolm, shm, size, optee_ffa_shm_register); ++} ++ ++static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm, ++ struct tee_shm *shm) ++{ ++ optee_ffa_shm_unregister(shm->ctx, shm); ++ free_pages((unsigned long)shm->kaddr, get_order(shm->size)); ++ shm->kaddr = NULL; ++} ++ ++static const struct tee_shm_pool_mgr_ops pool_ffa_ops = { ++ .alloc = pool_ffa_op_alloc, ++ .free = pool_ffa_op_free, ++ .destroy_poolmgr = pool_op_destroy_poolmgr, ++}; ++ ++/** ++ * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool ++ * ++ * This pool is used with OP-TEE over FF-A. In this case command buffers ++ * and such are allocated from kernel's own memory. ++ */ ++struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void) ++{ ++ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); ++ ++ if (!mgr) ++ return ERR_PTR(-ENOMEM); ++ ++ mgr->ops = &pool_ffa_ops; ++ ++ return mgr; ++} ++#endif /*CONFIG_ARM_FFA_TRANSPORT*/ +diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h +index 28109d991c4b..34c5fd74a3ff 100644 +--- a/drivers/tee/optee/shm_pool.h ++++ b/drivers/tee/optee/shm_pool.h +@@ -10,5 +10,6 @@ + #include <linux/tee_drv.h> + + struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void); ++struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void); + + #endif +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0014-optee-add-FF-A-support.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0014-optee-add-FF-A-support.patch new file mode 100644 index 0000000000..faf6cd0220 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0014-optee-add-FF-A-support.patch @@ -0,0 +1,1270 @@ +From 5665ffd3fb8e94003abc1c0c05c9fa30d4028b67 Mon Sep 17 00:00:00 2001 +From: Jens Wiklander <jens.wiklander@linaro.org> +Date: Thu, 25 Mar 2021 15:08:53 +0100 +Subject: [PATCH 19/22] optee: add FF-A support + +Adds support for using FF-A [1] as transport to the OP-TEE driver. + +Introduces struct optee_msg_param_fmem which carries all information +needed when OP-TEE is calling FFA_MEM_RETRIEVE_REQ to get the shared +memory reference mapped by the hypervisor in S-EL2. Register usage is +also updated to include the information needed. + +The FF-A part of this driver is enabled if CONFIG_ARM_FFA_TRANSPORT is +enabled. + +[1] https://developer.arm.com/documentation/den0077/latest +Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org> + +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/tee/optee/call.c | 212 ++++++++++++- + drivers/tee/optee/core.c | 486 +++++++++++++++++++++++++++++- + drivers/tee/optee/optee_ffa.h | 153 ++++++++++ + drivers/tee/optee/optee_msg.h | 27 +- + drivers/tee/optee/optee_private.h | 52 ++++ + drivers/tee/optee/rpc.c | 118 ++++++++ + 6 files changed, 1040 insertions(+), 8 deletions(-) + create mode 100644 drivers/tee/optee/optee_ffa.h + +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c +index e7b93153252c..cf91a81a242a 100644 +--- a/drivers/tee/optee/call.c ++++ b/drivers/tee/optee/call.c +@@ -3,15 +3,18 @@ + * Copyright (c) 2015-2021, Linaro Limited + */ + #include <linux/arm-smccc.h> ++#include <linux/arm_ffa.h> + #include <linux/device.h> + #include <linux/err.h> + #include <linux/errno.h> + #include <linux/mm.h> + #include <linux/sched.h> ++#include <linux/scatterlist.h> + #include <linux/slab.h> + #include <linux/tee_drv.h> + #include <linux/types.h> + #include <linux/uaccess.h> ++#include "optee_ffa.h" + #include "optee_private.h" + #include "optee_smc.h" + +@@ -180,11 +183,21 @@ int optee_do_call_with_arg(struct tee_context *ctx, struct tee_shm *arg) + static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + struct optee_msg_arg **msg_arg) + { ++ struct optee *optee = tee_get_drvdata(ctx->teedev); ++ size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params); + struct tee_shm *shm; + struct optee_msg_arg *ma; + +- shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), +- TEE_SHM_MAPPED); ++ /* ++ * rpc_arg_count is set to the number of allocated parameters in ++ * the RPC argument struct if a second MSG arg struct is expected. ++ * The second arg struct will then be used for RPC. So far only ++ * enabled when using FF-A as transport layer. ++ */ ++ if (optee->rpc_arg_count) ++ sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count); ++ ++ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + if (IS_ERR(shm)) + return shm; + +@@ -673,3 +686,198 @@ int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) + { + return 0; + } ++ ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++static int optee_ffa_yielding_call(struct tee_context *ctx, ++ struct ffa_send_direct_data *data, ++ struct optee_msg_arg *rpc_arg) ++{ ++ struct optee *optee = tee_get_drvdata(ctx->teedev); ++ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; ++ struct ffa_device *ffa_dev = optee->ffa.ffa_dev; ++ struct optee_call_waiter w; ++ u32 cmd = data->data0; ++ u32 w4 = data->data1; ++ u32 w5 = data->data2; ++ u32 w6 = data->data3; ++ int rc; ++ ++ /* Initialize waiter */ ++ optee_cq_wait_init(&optee->call_queue, &w); ++ while (true) { ++ rc = ffa_ops->sync_send_receive(ffa_dev, data); ++ if (rc) ++ goto done; ++ ++ switch ((int)data->data0) { ++ case TEEC_SUCCESS: ++ break; ++ case TEEC_ERROR_BUSY: ++ if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) { ++ rc = -EIO; ++ goto done; ++ } ++ ++ /* ++ * Out of threads in secure world, wait for a thread ++ * become available. ++ */ ++ optee_cq_wait_for_completion(&optee->call_queue, &w); ++ data->data0 = cmd; ++ data->data1 = w4; ++ data->data2 = w5; ++ data->data3 = w6; ++ continue; ++ default: ++ rc = -EIO; ++ goto done; ++ } ++ ++ if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE) ++ goto done; ++ ++ /* ++ * OP-TEE has returned with a RPC request. ++ * ++ * Note that data->data4 (passed in register w7) is already ++ * filled in by ffa_ops->sync_send_receive() returning ++ * above. ++ */ ++ cond_resched(); ++ optee_handle_ffa_rpc(ctx, data->data1, rpc_arg); ++ cmd = OPTEE_FFA_YIELDING_CALL_RESUME; ++ data->data0 = cmd; ++ data->data1 = 0; ++ data->data2 = 0; ++ data->data3 = 0; ++ } ++done: ++ /* ++ * We're done with our thread in secure world, if there's any ++ * thread waiters wake up one. ++ */ ++ optee_cq_wait_final(&optee->call_queue, &w); ++ ++ return rc; ++} ++ ++/** ++ * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world ++ * @ctx: calling context ++ * @shm: shared memory holding the message to pass to secure world ++ * ++ * Does a FF-A call to OP-TEE in secure world and handles eventual resulting ++ * Remote Procedure Calls (RPC) from OP-TEE. ++ * ++ * Returns return code from FF-A, 0 is OK ++ */ ++ ++int optee_ffa_do_call_with_arg(struct tee_context *ctx, struct tee_shm *shm) ++{ ++ struct ffa_send_direct_data data = { ++ .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG, ++ .data1 = (u32)shm->sec_world_id, ++ .data2 = (u32)(shm->sec_world_id >> 32), ++ .data3 = shm->offset, ++ }; ++ struct optee_msg_arg *arg = tee_shm_get_va(shm, 0); ++ unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); ++ struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); ++ ++ return optee_ffa_yielding_call(ctx, &data, rpc_arg); ++} ++ ++int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm, ++ struct page **pages, size_t num_pages, ++ unsigned long start) ++{ ++ struct optee *optee = tee_get_drvdata(ctx->teedev); ++ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; ++ struct ffa_device *ffa_dev = optee->ffa.ffa_dev; ++ struct ffa_mem_region_attributes mem_attr = { ++ .receiver = ffa_dev->vm_id, ++ .attrs = FFA_MEM_RW, ++ }; ++ struct ffa_mem_ops_args args = { ++ .use_txbuf = true, ++ .attrs = &mem_attr, ++ .nattrs = 1, ++ }; ++ struct sg_table sgt; ++ int rc; ++ ++ rc = check_mem_type(start, num_pages); ++ if (rc) ++ return rc; ++ ++ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0, ++ num_pages * PAGE_SIZE, GFP_KERNEL); ++ if (rc) ++ return rc; ++ args.sg = sgt.sgl; ++ rc = ffa_ops->memory_share(ffa_dev, &args); ++ sg_free_table(&sgt); ++ if (rc) ++ return rc; ++ ++ rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle); ++ if (rc) { ++ ffa_ops->memory_reclaim(args.g_handle, 0); ++ return rc; ++ } ++ ++ shm->sec_world_id = args.g_handle; ++ ++ return 0; ++} ++ ++int optee_ffa_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) ++{ ++ struct optee *optee = tee_get_drvdata(ctx->teedev); ++ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; ++ struct ffa_device *ffa_dev = optee->ffa.ffa_dev; ++ u64 global_handle = shm->sec_world_id; ++ struct ffa_send_direct_data data = { ++ .data0 = OPTEE_FFA_UNREGISTER_SHM, ++ .data1 = (u32)global_handle, ++ .data2 = (u32)(global_handle >> 32) ++ }; ++ int rc; ++ ++ optee_shm_rem_ffa_handle(optee, global_handle); ++ shm->sec_world_id = 0; ++ ++ rc = ffa_ops->sync_send_receive(ffa_dev, &data); ++ if (rc) ++ pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc); ++ ++ rc = ffa_ops->memory_reclaim(global_handle, 0); ++ if (rc) ++ pr_err("mem_reclain: 0x%llx %d", global_handle, rc); ++ ++ return rc; ++} ++ ++int optee_ffa_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) ++{ ++ struct optee *optee = tee_get_drvdata(ctx->teedev); ++ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops; ++ u64 global_handle = shm->sec_world_id; ++ int rc; ++ ++ /* ++ * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call ++ * since this is OP-TEE freeing via RPC so it has already retired ++ * this ID. ++ */ ++ ++ optee_shm_rem_ffa_handle(optee, global_handle); ++ rc = ffa_ops->memory_reclaim(global_handle, 0); ++ if (rc) ++ pr_err("mem_reclain: 0x%llx %d", global_handle, rc); ++ ++ shm->sec_world_id = 0; ++ ++ return rc; ++} ++#endif /*CONFIG_ARM_FFA_TRANSPORT*/ +diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c +index ab602bb8e14a..b9719c60dc48 100644 +--- a/drivers/tee/optee/core.c ++++ b/drivers/tee/optee/core.c +@@ -6,6 +6,7 @@ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/arm-smccc.h> ++#include <linux/arm_ffa.h> + #include <linux/errno.h> + #include <linux/io.h> + #include <linux/module.h> +@@ -20,6 +21,7 @@ + #include <linux/workqueue.h> + #include "optee_private.h" + #include "optee_smc.h" ++#include "optee_ffa.h" + #include "shm_pool.h" + + #define DRIVER_NAME "optee" +@@ -299,10 +301,9 @@ static int optee_open(struct tee_context *ctx) + mutex_init(&ctxdata->mutex); + INIT_LIST_HEAD(&ctxdata->sess_list); + +- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) +- ctx->cap_memref_null = true; +- else +- ctx->cap_memref_null = false; ++ ctx->cap_memref_null = optee_is_ffa_based(optee) || ++ (optee->sec_caps & ++ OPTEE_SMC_SEC_CAP_MEMREF_NULL); + + ctx->data = ctxdata; + return 0; +@@ -567,6 +568,472 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) + return rc; + } + ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++static void optee_ffa_get_version(struct tee_device *teedev, ++ struct tee_ioctl_version_data *vers) ++{ ++ struct tee_ioctl_version_data v = { ++ .impl_id = TEE_IMPL_ID_OPTEE, ++ .impl_caps = TEE_OPTEE_CAP_TZ, ++ .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM | ++ TEE_GEN_CAP_MEMREF_NULL, ++ }; ++ ++ *vers = v; ++} ++ ++struct shm_rhash { ++ struct tee_shm *shm; ++ u64 global_id; ++ struct rhash_head linkage; ++}; ++ ++static void rh_free_fn(void *ptr, void *arg) ++{ ++ kfree(ptr); ++} ++ ++static const struct rhashtable_params shm_rhash_params = { ++ .head_offset = offsetof(struct shm_rhash, linkage), ++ .key_len = sizeof(u64), ++ .key_offset = offsetof(struct shm_rhash, global_id), ++ .automatic_shrinking = true, ++}; ++ ++struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee, u64 global_id) ++{ ++ struct tee_shm *shm = NULL; ++ struct shm_rhash *r; ++ ++ mutex_lock(&optee->ffa.mutex); ++ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id, ++ shm_rhash_params); ++ if (r) ++ shm = r->shm; ++ mutex_unlock(&optee->ffa.mutex); ++ ++ return shm; ++} ++ ++int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm, ++ u64 global_id) ++{ ++ struct shm_rhash *r; ++ int rc; ++ ++ r = kmalloc(sizeof(*r), GFP_KERNEL); ++ if (!r) ++ return -ENOMEM; ++ r->shm = shm; ++ r->global_id = global_id; ++ ++ mutex_lock(&optee->ffa.mutex); ++ rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage, ++ shm_rhash_params); ++ mutex_unlock(&optee->ffa.mutex); ++ ++ if (rc) ++ kfree(r); ++ ++ return rc; ++} ++ ++int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id) ++{ ++ struct shm_rhash *r; ++ int rc = -ENOENT; ++ ++ mutex_lock(&optee->ffa.mutex); ++ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id, ++ shm_rhash_params); ++ if (r) ++ rc = rhashtable_remove_fast(&optee->ffa.global_ids, ++ &r->linkage, shm_rhash_params); ++ mutex_unlock(&optee->ffa.mutex); ++ ++ if (!rc) ++ kfree(r); ++ ++ return rc; ++} ++ ++static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p, ++ u32 attr, const struct optee_msg_param *mp) ++{ ++ struct tee_shm *shm = NULL; ++ u64 offs_high = 0; ++ u64 offs_low = 0; ++ ++ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + ++ attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; ++ p->u.memref.size = mp->u.fmem.size; ++ ++ if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) ++ shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id); ++ p->u.memref.shm = shm; ++ ++ if (shm) { ++ offs_low = mp->u.fmem.offs_low; ++ offs_high = mp->u.fmem.offs_high; ++ } ++ p->u.memref.shm_offs = offs_low | offs_high << 32; ++} ++ ++/** ++ * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to ++ * struct tee_param ++ * @optee: main service struct ++ * @params: subsystem internal parameter representation ++ * @num_params: number of elements in the parameter arrays ++ * @msg_params: OPTEE_MSG parameters ++ * ++ * Returns 0 on success or <0 on failure ++ */ ++static int optee_ffa_from_msg_param(struct optee *optee, ++ struct tee_param *params, size_t num_params, ++ const struct optee_msg_param *msg_params) ++{ ++ size_t n; ++ ++ for (n = 0; n < num_params; n++) { ++ struct tee_param *p = params + n; ++ const struct optee_msg_param *mp = msg_params + n; ++ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; ++ ++ switch (attr) { ++ case OPTEE_MSG_ATTR_TYPE_NONE: ++ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; ++ memset(&p->u, 0, sizeof(p->u)); ++ break; ++ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: ++ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: ++ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: ++ from_msg_param_value(p, attr, mp); ++ break; ++ case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT: ++ case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT: ++ case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT: ++ from_msg_param_ffa_mem(optee, p, attr, mp); ++ break; ++ default: ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static int to_msg_param_ffa_mem(struct optee_msg_param *mp, ++ const struct tee_param *p) ++{ ++ struct tee_shm *shm = p->u.memref.shm; ++ ++ mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr - ++ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; ++ ++ if (shm) { ++ u64 shm_offs = p->u.memref.shm_offs; ++ ++ mp->u.fmem.internal_offs = shm->offset; ++ ++ mp->u.fmem.offs_low = shm_offs; ++ mp->u.fmem.offs_high = shm_offs >> 32; ++ /* Check that the entire offset could be stored. */ ++ if (mp->u.fmem.offs_high != shm_offs >> 32) ++ return -EINVAL; ++ ++ mp->u.fmem.global_id = shm->sec_world_id; ++ } else { ++ memset(&mp->u, 0, sizeof(mp->u)); ++ mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; ++ } ++ mp->u.fmem.size = p->u.memref.size; ++ ++ return 0; ++} ++ ++/** ++ * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG ++ * parameters ++ * @optee: main service struct ++ * @msg_params: OPTEE_MSG parameters ++ * @num_params: number of elements in the parameter arrays ++ * @params: subsystem itnernal parameter representation ++ * Returns 0 on success or <0 on failure ++ */ ++static int optee_ffa_to_msg_param(struct optee *optee, ++ struct optee_msg_param *msg_params, ++ size_t num_params, ++ const struct tee_param *params) ++{ ++ size_t n; ++ ++ for (n = 0; n < num_params; n++) { ++ const struct tee_param *p = params + n; ++ struct optee_msg_param *mp = msg_params + n; ++ ++ switch (p->attr) { ++ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: ++ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; ++ memset(&mp->u, 0, sizeof(mp->u)); ++ break; ++ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: ++ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: ++ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: ++ to_msg_param_value(mp, p); ++ break; ++ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: ++ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: ++ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: ++ if (to_msg_param_ffa_mem(mp, p)) ++ return -EINVAL; ++ break; ++ default: ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev, ++ const struct ffa_dev_ops *ops) ++{ ++ struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION }; ++ int rc; ++ ++ ops->mode_32bit_set(ffa_dev); ++ ++ rc = ops->sync_send_receive(ffa_dev, &data); ++ if (rc) { ++ pr_err("Unexpected error %d\n", rc); ++ return false; ++ } ++ if (data.data0 != OPTEE_FFA_VERSION_MAJOR || ++ data.data1 < OPTEE_FFA_VERSION_MINOR) { ++ pr_err("Incompatible OP-TEE API version %lu.%lu", ++ data.data0, data.data1); ++ return false; ++ } ++ ++ data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION }; ++ rc = ops->sync_send_receive(ffa_dev, &data); ++ if (rc) { ++ pr_err("Unexpected error %d\n", rc); ++ return false; ++ } ++ if (data.data2) ++ pr_info("revision %lu.%lu (%08lx)", ++ data.data0, data.data1, data.data2); ++ else ++ pr_info("revision %lu.%lu", data.data0, data.data1); ++ ++ return true; ++} ++ ++static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, ++ const struct ffa_dev_ops *ops, ++ u32 *sec_caps, unsigned int *rpc_arg_count) ++{ ++ struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES }; ++ int rc; ++ ++ rc = ops->sync_send_receive(ffa_dev, &data); ++ if (rc) { ++ pr_err("Unexpected error %d", rc); ++ return false; ++ } ++ if (data.data0) { ++ pr_err("Unexpected exchange error %lu", data.data0); ++ return false; ++ } ++ ++ *sec_caps = 0; ++ *rpc_arg_count = (u8)data.data1; ++ ++ return true; ++} ++ ++static struct tee_shm_pool *optee_ffa_config_dyn_shm(void) ++{ ++ struct tee_shm_pool_mgr *priv_mgr; ++ struct tee_shm_pool_mgr *dmabuf_mgr; ++ void *rc; ++ ++ rc = optee_ffa_shm_pool_alloc_pages(); ++ if (IS_ERR(rc)) ++ return rc; ++ priv_mgr = rc; ++ ++ rc = optee_ffa_shm_pool_alloc_pages(); ++ if (IS_ERR(rc)) { ++ tee_shm_pool_mgr_destroy(priv_mgr); ++ return rc; ++ } ++ dmabuf_mgr = rc; ++ ++ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); ++ if (IS_ERR(rc)) { ++ tee_shm_pool_mgr_destroy(priv_mgr); ++ tee_shm_pool_mgr_destroy(dmabuf_mgr); ++ } ++ ++ return rc; ++} ++ ++static const struct tee_driver_ops optee_ffa_clnt_ops = { ++ .get_version = optee_ffa_get_version, ++ .open = optee_open, ++ .release = optee_release, ++ .open_session = optee_open_session, ++ .close_session = optee_close_session, ++ .invoke_func = optee_invoke_func, ++ .cancel_req = optee_cancel_req, ++ .shm_register = optee_ffa_shm_register, ++ .shm_unregister = optee_ffa_shm_unregister, ++}; ++ ++static const struct tee_desc optee_ffa_clnt_desc = { ++ .name = DRIVER_NAME "ffa-clnt", ++ .ops = &optee_ffa_clnt_ops, ++ .owner = THIS_MODULE, ++}; ++ ++static const struct tee_driver_ops optee_ffa_supp_ops = { ++ .get_version = optee_ffa_get_version, ++ .open = optee_open, ++ .release = optee_release_supp, ++ .supp_recv = optee_supp_recv, ++ .supp_send = optee_supp_send, ++ .shm_register = optee_ffa_shm_register, /* same as for clnt ops */ ++ .shm_unregister = optee_ffa_shm_unregister_supp, ++}; ++ ++static const struct tee_desc optee_ffa_supp_desc = { ++ .name = DRIVER_NAME "ffa-supp", ++ .ops = &optee_ffa_supp_ops, ++ .owner = THIS_MODULE, ++ .flags = TEE_DESC_PRIVILEGED, ++}; ++ ++static const struct optee_ops optee_ffa_ops = { ++ .do_call_with_arg = optee_ffa_do_call_with_arg, ++ .to_msg_param = optee_ffa_to_msg_param, ++ .from_msg_param = optee_ffa_from_msg_param, ++}; ++ ++static void optee_ffa_remove(struct ffa_device *ffa_dev) ++{ ++ (void)ffa_dev; ++} ++ ++static int optee_ffa_probe(struct ffa_device *ffa_dev) ++{ ++ const struct ffa_dev_ops *ffa_ops; ++ unsigned int rpc_arg_count; ++ struct tee_device *teedev; ++ struct optee *optee; ++ u32 sec_caps; ++ int rc; ++ ++ ffa_ops = ffa_dev_ops_get(ffa_dev); ++ if (!ffa_ops) { ++ pr_warn("failed \"method\" init: ffa\n"); ++ return -ENOENT; ++ } ++ ++ if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops)) ++ return -EINVAL; ++ ++ if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps, ++ &rpc_arg_count)) ++ return -EINVAL; ++ ++ optee = kzalloc(sizeof(*optee), GFP_KERNEL); ++ if (!optee) { ++ rc = -ENOMEM; ++ goto err; ++ } ++ optee->pool = optee_ffa_config_dyn_shm(); ++ if (IS_ERR(optee->pool)) { ++ rc = PTR_ERR(optee->pool); ++ optee->pool = NULL; ++ goto err; ++ } ++ ++ optee->ops = &optee_ffa_ops; ++ optee->ffa.ffa_dev = ffa_dev; ++ optee->ffa.ffa_ops = ffa_ops; ++ optee->sec_caps = sec_caps; ++ optee->rpc_arg_count = rpc_arg_count; ++ ++ teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool, ++ optee); ++ if (IS_ERR(teedev)) { ++ rc = PTR_ERR(teedev); ++ goto err; ++ } ++ optee->teedev = teedev; ++ ++ teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool, ++ optee); ++ if (IS_ERR(teedev)) { ++ rc = PTR_ERR(teedev); ++ goto err; ++ } ++ optee->supp_teedev = teedev; ++ ++ rc = tee_device_register(optee->teedev); ++ if (rc) ++ goto err; ++ ++ rc = tee_device_register(optee->supp_teedev); ++ if (rc) ++ goto err; ++ ++ rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params); ++ if (rc) ++ goto err; ++ mutex_init(&optee->ffa.mutex); ++ mutex_init(&optee->call_queue.mutex); ++ INIT_LIST_HEAD(&optee->call_queue.waiters); ++ optee_wait_queue_init(&optee->wait_queue); ++ optee_supp_init(&optee->supp); ++ ffa_dev_set_drvdata(ffa_dev, optee); ++ ++ pr_info("initialized driver\n"); ++ return 0; ++err: ++ /* ++ * tee_device_unregister() is safe to call even if the ++ * devices hasn't been registered with ++ * tee_device_register() yet. ++ */ ++ tee_device_unregister(optee->supp_teedev); ++ tee_device_unregister(optee->teedev); ++ if (optee->pool) ++ tee_shm_pool_free(optee->pool); ++ kfree(optee); ++ return rc; ++} ++ ++static const struct ffa_device_id optee_ffa_device_id[] = { ++ /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */ ++ { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3, ++ 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) }, ++ {} ++}; ++ ++static struct ffa_driver optee_ffa_driver = { ++ .name = "optee", ++ .probe = optee_ffa_probe, ++ .remove = optee_ffa_remove, ++ .id_table = optee_ffa_device_id, ++}; ++ ++module_ffa_driver(optee_ffa_driver); ++#endif /*CONFIG_ARM_FFA_TRANSPORT*/ ++ + /* Simple wrapper functions to be able to use a function pointer */ + static void optee_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, +@@ -615,7 +1082,8 @@ static int optee_remove(struct platform_device *pdev) + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ +- optee_disable_shm_cache(optee); ++ if (!optee_is_ffa_based(optee)) ++ optee_disable_shm_cache(optee); + + /* + * The two devices have to be unregistered before we can free the +@@ -631,6 +1099,14 @@ static int optee_remove(struct platform_device *pdev) + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++ if (optee->ffa.ffa_ops) { ++ mutex_destroy(&optee->ffa.mutex); ++ rhashtable_free_and_destroy(&optee->ffa.global_ids, ++ rh_free_fn, NULL); ++ } ++#endif /*CONFIG_ARM_FFA_TRANSPORT*/ ++ + kfree(optee); + + return 0; +diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h +new file mode 100644 +index 000000000000..ee3a03fc392c +--- /dev/null ++++ b/drivers/tee/optee/optee_ffa.h +@@ -0,0 +1,153 @@ ++/* SPDX-License-Identifier: BSD-2-Clause */ ++/* ++ * Copyright (c) 2019-2021, Linaro Limited ++ */ ++ ++/* ++ * This file is exported by OP-TEE and is kept in sync between secure world ++ * and normal world drivers. We're using ARM FF-A 1.0 specification. ++ */ ++ ++#ifndef __OPTEE_FFA_H ++#define __OPTEE_FFA_H ++ ++#include <linux/arm_ffa.h> ++ ++/* ++ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and ++ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal ++ * messages. ++ * ++ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP ++ * are using the AArch32 SMC calling convention with register usage as ++ * defined in FF-A specification: ++ * w0: Function ID (0x8400006F or 0x84000070) ++ * w1: Source/Destination IDs ++ * w2: Reserved (MBZ) ++ * w3-w7: Implementation defined, free to be used below ++ */ ++ ++#define OPTEE_FFA_VERSION_MAJOR 1 ++#define OPTEE_FFA_VERSION_MINOR 0 ++ ++#define OPTEE_FFA_BLOCKING_CALL(id) (id) ++#define OPTEE_FFA_YIELDING_CALL_BIT 31 ++#define OPTEE_FFA_YIELDING_CALL(id) ((id) | BIT(OPTEE_FFA_YIELDING_CALL_BIT)) ++ ++/* ++ * Returns the API version implemented, currently follows the FF-A version. ++ * Call register usage: ++ * w3: Service ID, OPTEE_FFA_GET_API_VERSION ++ * w4-w7: Not used (MBZ) ++ * ++ * Return register usage: ++ * w3: OPTEE_FFA_VERSION_MAJOR ++ * w4: OPTEE_FFA_VERSION_MINOR ++ * w5-w7: Not used (MBZ) ++ */ ++#define OPTEE_FFA_GET_API_VERSION OPTEE_FFA_BLOCKING_CALL(0) ++ ++/* ++ * Returns the revision of OP-TEE. ++ * ++ * Used by non-secure world to figure out which version of the Trusted OS ++ * is installed. Note that the returned revision is the revision of the ++ * Trusted OS, not of the API. ++ * ++ * Call register usage: ++ * w3: Service ID, OPTEE_FFA_GET_OS_VERSION ++ * w4-w7: Unused (MBZ) ++ * ++ * Return register usage: ++ * w3: CFG_OPTEE_REVISION_MAJOR ++ * w4: CFG_OPTEE_REVISION_MINOR ++ * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported) ++ */ ++#define OPTEE_FFA_GET_OS_VERSION OPTEE_FFA_BLOCKING_CALL(1) ++ ++/* ++ * Exchange capabilities between normal world and secure world. ++ * ++ * Currently there are no defined capabilities. When features are added new ++ * capabilities may be added. ++ * ++ * Call register usage: ++ * w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES ++ * w4-w7: Note used (MBZ) ++ * ++ * Return register usage: ++ * w3: Error code, 0 on success ++ * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied ++ * as the second MSG arg struct for ++ * OPTEE_FFA_YIELDING_CALL_WITH_ARG. ++ * Bit[31:8]: Reserved (MBZ) ++ * w5-w7: Note used (MBZ) ++ */ ++#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2) ++ ++/* ++ * Unregister shared memory ++ * ++ * Call register usage: ++ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM ++ * w4: Shared memory handle, lower bits ++ * w5: Shared memory handle, higher bits ++ * w6-w7: Not used (MBZ) ++ * ++ * Return register usage: ++ * w3: Error code, 0 on success ++ * w4-w7: Note used (MBZ) ++ */ ++#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3) ++ ++/* ++ * Call with struct optee_msg_arg as argument in the supplied shared memory ++ * with a zero internal offset and normal cached memory attributes. ++ * Register usage: ++ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_WITH_ARG ++ * w4: Lower 32 bits of a 64-bit Shared memory handle ++ * w5: Upper 32 bits of a 64-bit Shared memory handle ++ * w6: Offset into shared memory pointing to a struct optee_msg_arg ++ * right after the parameters of this struct (at offset ++ * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg ++ * for RPC, this struct has reserved space for the number of RPC ++ * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES. ++ * w7: Not used (MBZ) ++ * Resume from RPC. Register usage: ++ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME ++ * w4-w6: Not used (MBZ) ++ * w7: Resume info ++ * ++ * Normal return (yielding call is completed). Register usage: ++ * w3: Error code, 0 on success ++ * w4: OPTEE_FFA_YIELDING_CALL_RETURN_DONE ++ * w5-w7: Not used (MBZ) ++ * ++ * RPC interrupt return (RPC from secure world). Register usage: ++ * w3: Error code == 0 ++ * w4: Any defined RPC code but OPTEE_FFA_YIELDING_CALL_RETURN_DONE ++ * w5-w6: Not used (MBZ) ++ * w7: Resume info ++ * ++ * Possible error codes in register w3: ++ * 0: Success ++ * FFA_DENIED: w4 isn't one of OPTEE_FFA_YIELDING_CALL_START ++ * OPTEE_FFA_YIELDING_CALL_RESUME ++ * ++ * Possible error codes for OPTEE_FFA_YIELDING_CALL_START, ++ * FFA_BUSY: Number of OP-TEE OS threads exceeded, ++ * try again later ++ * FFA_DENIED: RPC shared memory object not found ++ * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory ++ * ++ * Possible error codes for OPTEE_FFA_YIELDING_CALL_RESUME ++ * FFA_INVALID_PARAMETER: Bad resume info ++ */ ++#define OPTEE_FFA_YIELDING_CALL_WITH_ARG OPTEE_FFA_YIELDING_CALL(0) ++#define OPTEE_FFA_YIELDING_CALL_RESUME OPTEE_FFA_YIELDING_CALL(1) ++ ++#define OPTEE_FFA_YIELDING_CALL_RETURN_DONE 0 ++#define OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD 1 ++#define OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT 2 ++ ++#endif /*__OPTEE_FFA_H*/ +diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h +index 5bef6a0165db..1ee943980c68 100644 +--- a/drivers/tee/optee/optee_msg.h ++++ b/drivers/tee/optee/optee_msg.h +@@ -28,6 +28,9 @@ + #define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 + #define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 + #define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 ++#define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT ++#define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT ++#define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT + #define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 + #define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa + #define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb +@@ -96,6 +99,8 @@ + */ + #define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096 + ++#define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff ++ + /** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer +@@ -127,6 +132,23 @@ struct optee_msg_param_rmem { + u64 shm_ref; + }; + ++/** ++ * struct optee_msg_param_fmem - ffa memory reference parameter ++ * @offs_lower: Lower bits of offset into shared memory reference ++ * @offs_upper: Upper bits of offset into shared memory reference ++ * @internal_offs: Internal offset into the first page of shared memory ++ * reference ++ * @size: Size of the buffer ++ * @global_id: Global identifier of Shared memory ++ */ ++struct optee_msg_param_fmem { ++ u32 offs_low; ++ u16 offs_high; ++ u16 internal_offs; ++ u64 size; ++ u64 global_id; ++}; ++ + /** + * struct optee_msg_param_value - opaque value parameter + * +@@ -143,13 +165,15 @@ struct optee_msg_param_value { + * @attr: attributes + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference ++ * @fmem: parameter by ffa registered memory reference + * @value: parameter by opaque value + * @octets: parameter by octet string + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and +- * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem, ++ * OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates ++ * @rmem or @fmem depending on the conduit. + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ + struct optee_msg_param { +@@ -157,6 +181,7 @@ struct optee_msg_param { + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; ++ struct optee_msg_param_fmem fmem; + struct optee_msg_param_value value; + u8 octets[24]; + } u; +diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h +index c5741e96e967..1ffe74e66d15 100644 +--- a/drivers/tee/optee/optee_private.h ++++ b/drivers/tee/optee/optee_private.h +@@ -7,6 +7,7 @@ + #define OPTEE_PRIVATE_H + + #include <linux/arm-smccc.h> ++#include <linux/rhashtable.h> + #include <linux/semaphore.h> + #include <linux/tee_drv.h> + #include <linux/types.h> +@@ -20,6 +21,7 @@ + #define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A + #define TEEC_ERROR_COMMUNICATION 0xFFFF000E + #define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C ++#define TEEC_ERROR_BUSY 0xFFFF000D + #define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010 + + #define TEEC_ORIGIN_COMMS 0x00000002 +@@ -66,6 +68,22 @@ struct optee_supp { + struct completion reqs_c; + }; + ++/** ++ * struct optee_ffa_data - FFA communication struct ++ * @ffa_dev FFA device, contains the destination id, the id of ++ * OP-TEE in secure world ++ * @ffa_ops FFA operations ++ * @mutex Serializes access to @global_ids ++ * @global_ids FF-A shared memory global handle translation ++ */ ++struct optee_ffa { ++ struct ffa_device *ffa_dev; ++ const struct ffa_dev_ops *ffa_ops; ++ /* Serializes access to @global_ids */ ++ struct mutex mutex; ++ struct rhashtable global_ids; ++}; ++ + struct optee; + + /** +@@ -113,11 +131,15 @@ struct optee { + struct tee_device *teedev; + const struct optee_ops *ops; + optee_invoke_fn *invoke_fn; ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++ struct optee_ffa ffa; ++#endif + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; + struct optee_supp supp; + struct tee_shm_pool *pool; + void *memremaped_shm; ++ unsigned int rpc_arg_count; + u32 sec_caps; + bool scan_bus_done; + struct workqueue_struct *scan_bus_wq; +@@ -206,6 +228,36 @@ void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, + #define PTA_CMD_GET_DEVICES_SUPP 0x1 + int optee_enumerate_devices(u32 func); + ++int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm, ++ u64 global_id); ++int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id); ++ ++struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee, u64 global_id); ++ ++int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm, ++ struct page **pages, size_t num_pages, ++ unsigned long start); ++int optee_ffa_shm_unregister(struct tee_context *ctx, struct tee_shm *shm); ++int optee_ffa_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, ++ struct page **pages, size_t num_pages, ++ unsigned long start); ++int optee_ffa_shm_unregister_supp(struct tee_context *ctx, ++ struct tee_shm *shm); ++ ++int optee_ffa_do_call_with_arg(struct tee_context *ctx, struct tee_shm *arg); ++int optee_ffa_rpc_shm_register(struct tee_context *ctx, struct tee_shm *shm); ++void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd, ++ struct optee_msg_arg *arg); ++ ++static inline bool optee_is_ffa_based(struct optee *optee) ++{ ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++ return optee->ffa.ffa_ops; ++#else ++ return false; ++#endif ++} ++ + /* + * Small helpers + */ +diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c +index 39562fb6841e..865a9ab3cf65 100644 +--- a/drivers/tee/optee/rpc.c ++++ b/drivers/tee/optee/rpc.c +@@ -10,6 +10,7 @@ + #include <linux/i2c.h> + #include <linux/slab.h> + #include <linux/tee_drv.h> ++#include "optee_ffa.h" + #include "optee_private.h" + #include "optee_smc.h" + #include "optee_rpc_cmd.h" +@@ -543,3 +544,120 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, + + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; + } ++ ++#ifdef CONFIG_ARM_FFA_TRANSPORT ++static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx, ++ struct optee_msg_arg *arg) ++{ ++ struct tee_shm *shm; ++ ++ if (arg->num_params != 1 || ++ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { ++ arg->ret = TEEC_ERROR_BAD_PARAMETERS; ++ return; ++ } ++ ++ switch (arg->params[0].u.value.a) { ++ case OPTEE_RPC_SHM_TYPE_APPL: ++ shm = cmd_alloc_suppl(ctx, arg->params[0].u.value.b); ++ break; ++ case OPTEE_RPC_SHM_TYPE_KERNEL: ++ shm = tee_shm_alloc(ctx, arg->params[0].u.value.b, ++ TEE_SHM_MAPPED); ++ break; ++ default: ++ arg->ret = TEEC_ERROR_BAD_PARAMETERS; ++ return; ++ } ++ ++ if (IS_ERR(shm)) { ++ arg->ret = TEEC_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ arg->params[0] = (struct optee_msg_param){ ++ .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT, ++ .u.fmem.size = tee_shm_get_size(shm), ++ .u.fmem.global_id = shm->sec_world_id, ++ .u.fmem.internal_offs = shm->offset, ++ }; ++ ++ arg->ret = TEEC_SUCCESS; ++} ++ ++static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx, ++ struct optee *optee, ++ struct optee_msg_arg *arg) ++{ ++ struct tee_shm *shm; ++ ++ if (arg->num_params != 1 || ++ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) ++ goto err_bad_param; ++ ++ shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b); ++ if (!shm) ++ goto err_bad_param; ++ switch (arg->params[0].u.value.a) { ++ case OPTEE_RPC_SHM_TYPE_APPL: ++ cmd_free_suppl(ctx, shm); ++ break; ++ case OPTEE_RPC_SHM_TYPE_KERNEL: ++ tee_shm_free(shm); ++ break; ++ default: ++ goto err_bad_param; ++ } ++ arg->ret = TEEC_SUCCESS; ++ return; ++ ++err_bad_param: ++ arg->ret = TEEC_ERROR_BAD_PARAMETERS; ++} ++ ++static void handle_ffa_rpc_func_cmd(struct tee_context *ctx, ++ struct optee_msg_arg *arg) ++{ ++ struct optee *optee = tee_get_drvdata(ctx->teedev); ++ ++ arg->ret_origin = TEEC_ORIGIN_COMMS; ++ switch (arg->cmd) { ++ case OPTEE_RPC_CMD_GET_TIME: ++ handle_rpc_func_cmd_get_time(arg); ++ break; ++ case OPTEE_RPC_CMD_WAIT_QUEUE: ++ handle_rpc_func_cmd_wq(optee, arg); ++ break; ++ case OPTEE_RPC_CMD_SUSPEND: ++ handle_rpc_func_cmd_wait(arg); ++ break; ++ case OPTEE_RPC_CMD_SHM_ALLOC: ++ handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg); ++ break; ++ case OPTEE_RPC_CMD_SHM_FREE: ++ handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg); ++ break; ++ case OPTEE_RPC_CMD_I2C_TRANSFER: ++ handle_rpc_func_cmd_i2c_transfer(ctx, arg); ++ break; ++ default: ++ handle_rpc_supp_cmd(ctx, optee, arg); ++ } ++} ++ ++void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd, ++ struct optee_msg_arg *arg) ++{ ++ switch (cmd) { ++ case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD: ++ handle_ffa_rpc_func_cmd(ctx, arg); ++ break; ++ case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT: ++ /* Interrupt delivered by now */ ++ break; ++ default: ++ pr_warn("Unknown RPC func 0x%x\n", cmd); ++ break; ++ } ++} ++#endif /*CONFIG_ARM_FFA_TRANSPORT*/ +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0015-coresight-etm4x-Save-restore-TRFCR_EL1.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0015-coresight-etm4x-Save-restore-TRFCR_EL1.patch new file mode 100644 index 0000000000..e67658fada --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0015-coresight-etm4x-Save-restore-TRFCR_EL1.patch @@ -0,0 +1,181 @@ +From 7150eac72ee0c2c7da03f53a90a871c3d6d4e538 Mon Sep 17 00:00:00 2001 +From: Suzuki K Poulose <suzuki.poulose@arm.com> +Date: Tue, 14 Sep 2021 11:26:32 +0100 +Subject: [PATCH 1/2] coresight: etm4x: Save restore TRFCR_EL1 + +When the CPU enters a low power mode, the TRFCR_EL1 contents could be +reset. Thus we need to save/restore the TRFCR_EL1 along with the ETM4x +registers to allow the tracing. + +The TRFCR related helpers are in a new header file, as we need to use +them for TRBE in the later patches. + +Cc: Mathieu Poirier <mathieu.poirier@linaro.org> +Cc: Anshuman Khandual <anshuman.khandual@arm.com> +Cc: Mike Leach <mike.leach@linaro.org> +Cc: Leo Yan <leo.yan@linaro.org> +Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> +Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> +Link: https://lore.kernel.org/r/20210914102641.1852544-2-suzuki.poulose@arm.com +[Fixed cosmetic details] +Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> + +Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=937d3f58cacf377cab7c32e475e1ffa91d611dce] +Signed-off-by: Davidson K <davidson.kumaresan@arm.com> +--- + .../coresight/coresight-etm4x-core.c | 43 +++++++++++++------ + drivers/hwtracing/coresight/coresight-etm4x.h | 2 + + .../coresight/coresight-self-hosted-trace.h | 24 +++++++++++ + 3 files changed, 57 insertions(+), 12 deletions(-) + create mode 100644 drivers/hwtracing/coresight/coresight-self-hosted-trace.h + +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c +index 90827077d2f9..b78080d169f8 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c ++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c +@@ -39,6 +39,7 @@ + + #include "coresight-etm4x.h" + #include "coresight-etm-perf.h" ++#include "coresight-self-hosted-trace.h" + + static int boot_enable; + module_param(boot_enable, int, 0444); +@@ -990,7 +991,7 @@ static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) + if (is_kernel_in_hyp_mode()) + trfcr |= TRFCR_EL2_CX; + +- write_sysreg_s(trfcr, SYS_TRFCR_EL1); ++ write_trfcr(trfcr); + } + + static void etm4_init_arch_data(void *info) +@@ -1528,7 +1529,7 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) + drvdata->trcid = coresight_get_trace_id(drvdata->cpu); + } + +-static int etm4_cpu_save(struct etmv4_drvdata *drvdata) ++static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) + { + int i, ret = 0; + struct etmv4_save_state *state; +@@ -1667,7 +1668,23 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) + return ret; + } + +-static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) ++static int etm4_cpu_save(struct etmv4_drvdata *drvdata) ++{ ++ int ret = 0; ++ ++ /* Save the TRFCR irrespective of whether the ETM is ON */ ++ if (drvdata->trfc) ++ drvdata->save_trfcr = read_trfcr(); ++ /* ++ * Save and restore the ETM Trace registers only if ++ * the ETM is active. ++ */ ++ if (local_read(&drvdata->mode) && drvdata->save_state) ++ ret = __etm4_cpu_save(drvdata); ++ return ret; ++} ++ ++static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) + { + int i; + struct etmv4_save_state *state = drvdata->save_state; +@@ -1763,6 +1780,14 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) + etm4_cs_lock(drvdata, csa); + } + ++static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) ++{ ++ if (drvdata->trfc) ++ write_trfcr(drvdata->save_trfcr); ++ if (drvdata->state_needs_restore) ++ __etm4_cpu_restore(drvdata); ++} ++ + static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, + void *v) + { +@@ -1774,23 +1799,17 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, + + drvdata = etmdrvdata[cpu]; + +- if (!drvdata->save_state) +- return NOTIFY_OK; +- + if (WARN_ON_ONCE(drvdata->cpu != cpu)) + return NOTIFY_BAD; + + switch (cmd) { + case CPU_PM_ENTER: +- /* save the state if self-hosted coresight is in use */ +- if (local_read(&drvdata->mode)) +- if (etm4_cpu_save(drvdata)) +- return NOTIFY_BAD; ++ if (etm4_cpu_save(drvdata)) ++ return NOTIFY_BAD; + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: +- if (drvdata->state_needs_restore) +- etm4_cpu_restore(drvdata); ++ etm4_cpu_restore(drvdata); + break; + default: + return NOTIFY_DONE; +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h +index e5b79bdb9851..82cba16b73a6 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x.h ++++ b/drivers/hwtracing/coresight/coresight-etm4x.h +@@ -921,6 +921,7 @@ struct etmv4_save_state { + * @lpoverride: If the implementation can support low-power state over. + * @trfc: If the implementation supports Arm v8.4 trace filter controls. + * @config: structure holding configuration parameters. ++ * @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event. + * @save_state: State to be preserved across power loss + * @state_needs_restore: True when there is context to restore after PM exit + * @skip_power_up: Indicates if an implementation can skip powering up +@@ -973,6 +974,7 @@ struct etmv4_drvdata { + bool lpoverride; + bool trfc; + struct etmv4_config config; ++ u64 save_trfcr; + struct etmv4_save_state *save_state; + bool state_needs_restore; + bool skip_power_up; +diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h +new file mode 100644 +index 000000000000..303d71911870 +--- /dev/null ++++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h +@@ -0,0 +1,24 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Arm v8 Self-Hosted trace support. ++ * ++ * Copyright (C) 2021 ARM Ltd. ++ */ ++ ++#ifndef __CORESIGHT_SELF_HOSTED_TRACE_H ++#define __CORESIGHT_SELF_HOSTED_TRACE_H ++ ++#include <asm/sysreg.h> ++ ++static inline u64 read_trfcr(void) ++{ ++ return read_sysreg_s(SYS_TRFCR_EL1); ++} ++ ++static inline void write_trfcr(u64 val) ++{ ++ write_sysreg_s(val, SYS_TRFCR_EL1); ++ isb(); ++} ++ ++#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */ +-- +2.34.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0016-coresight-etm4x-Use-Trace-Filtering-controls-dynamic.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0016-coresight-etm4x-Use-Trace-Filtering-controls-dynamic.patch new file mode 100644 index 0000000000..4d5a719b9d --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0016-coresight-etm4x-Use-Trace-Filtering-controls-dynamic.patch @@ -0,0 +1,227 @@ +From 55228b0522bfb7d945019a8931742ab9b063b6c9 Mon Sep 17 00:00:00 2001 +From: Suzuki K Poulose <suzuki.poulose@arm.com> +Date: Tue, 14 Sep 2021 11:26:33 +0100 +Subject: [PATCH 2/2] coresight: etm4x: Use Trace Filtering controls + dynamically + +The Trace Filtering support (FEAT_TRF) ensures that the ETM +can be prohibited from generating any trace for a given EL. +This is much stricter knob, than the TRCVICTLR exception level +masks, which doesn't prevent the ETM from generating Context +packets for an "excluded" EL. At the moment, we do a onetime +enable trace at user and kernel and leave it untouched for the +kernel life time. This implies that the ETM could potentially +generate trace packets containing the kernel addresses, and +thus leaking the kernel virtual address in the trace. + +This patch makes the switch dynamic, by honoring the filters +set by the user and enforcing them in the TRFCR controls. +We also rename the cpu_enable_tracing() appropriately to +cpu_detect_trace_filtering() and the drvdata member +trfc => trfcr to indicate the "value" of the TRFCR_EL1. + +Cc: Mathieu Poirier <mathieu.poirier@linaro.org> +Cc: Al Grant <al.grant@arm.com> +Cc: Mike Leach <mike.leach@linaro.org> +Cc: Leo Yan <leo.yan@linaro.org> +Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> +Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> +Link: https://lore.kernel.org/r/20210914102641.1852544-3-suzuki.poulose@arm.com +Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> + +Upstream-Status: Backport [https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5f6fd1aa8cc147b111af1a833574487a87237dc0] +Signed-off-by: Davidson K <davidson.kumaresan@arm.com> +--- + .../coresight/coresight-etm4x-core.c | 63 ++++++++++++++----- + drivers/hwtracing/coresight/coresight-etm4x.h | 7 ++- + .../coresight/coresight-self-hosted-trace.h | 7 +++ + 3 files changed, 59 insertions(+), 18 deletions(-) + +diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c +index b78080d169f8..b804d4413b43 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c ++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c +@@ -237,6 +237,45 @@ struct etm4_enable_arg { + int rc; + }; + ++/* ++ * etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs. ++ * When the CPU supports FEAT_TRF, we could move the ETM to a trace ++ * prohibited state by filtering the Exception levels via TRFCR_EL1. ++ */ ++static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) ++{ ++ /* If the CPU doesn't support FEAT_TRF, nothing to do */ ++ if (!drvdata->trfcr) ++ return; ++ cpu_prohibit_trace(); ++} ++ ++/* ++ * etm4x_allow_trace - Allow CPU tracing in the respective ELs, ++ * as configured by the drvdata->config.mode for the current ++ * session. Even though we have TRCVICTLR bits to filter the ++ * trace in the ELs, it doesn't prevent the ETM from generating ++ * a packet (e.g, TraceInfo) that might contain the addresses from ++ * the excluded levels. Thus we use the additional controls provided ++ * via the Trace Filtering controls (FEAT_TRF) to make sure no trace ++ * is generated for the excluded ELs. ++ */ ++static void etm4x_allow_trace(struct etmv4_drvdata *drvdata) ++{ ++ u64 trfcr = drvdata->trfcr; ++ ++ /* If the CPU doesn't support FEAT_TRF, nothing to do */ ++ if (!trfcr) ++ return; ++ ++ if (drvdata->config.mode & ETM_MODE_EXCL_KERN) ++ trfcr &= ~TRFCR_ELx_ExTRE; ++ if (drvdata->config.mode & ETM_MODE_EXCL_USER) ++ trfcr &= ~TRFCR_ELx_E0TRE; ++ ++ write_trfcr(trfcr); ++} ++ + #ifdef CONFIG_ETM4X_IMPDEF_FEATURE + + #define HISI_HIP08_AMBA_ID 0x000b6d01 +@@ -441,6 +480,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) + if (etm4x_is_ete(drvdata)) + etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR); + ++ etm4x_allow_trace(drvdata); + /* Enable the trace unit */ + etm4x_relaxed_write32(csa, 1, TRCPRGCTLR); + +@@ -724,7 +764,6 @@ static int etm4_enable(struct coresight_device *csdev, + static void etm4_disable_hw(void *info) + { + u32 control; +- u64 trfcr; + struct etmv4_drvdata *drvdata = info; + struct etmv4_config *config = &drvdata->config; + struct coresight_device *csdev = drvdata->csdev; +@@ -751,12 +790,7 @@ static void etm4_disable_hw(void *info) + * If the CPU supports v8.4 Trace filter Control, + * set the ETM to trace prohibited region. + */ +- if (drvdata->trfc) { +- trfcr = read_sysreg_s(SYS_TRFCR_EL1); +- write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE), +- SYS_TRFCR_EL1); +- isb(); +- } ++ etm4x_prohibit_trace(drvdata); + /* + * Make sure everything completes before disabling, as recommended + * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, +@@ -772,9 +806,6 @@ static void etm4_disable_hw(void *info) + if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) + dev_err(etm_dev, + "timeout while waiting for PM stable Trace Status\n"); +- if (drvdata->trfc) +- write_sysreg_s(trfcr, SYS_TRFCR_EL1); +- + /* read the status of the single shot comparators */ + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + config->ss_status[i] = +@@ -969,15 +1000,15 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata, + return false; + } + +-static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) ++static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) + { + u64 dfr0 = read_sysreg(id_aa64dfr0_el1); + u64 trfcr; + ++ drvdata->trfcr = 0; + if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT)) + return; + +- drvdata->trfc = true; + /* + * If the CPU supports v8.4 SelfHosted Tracing, enable + * tracing at the kernel EL and EL0, forcing to use the +@@ -991,7 +1022,7 @@ static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) + if (is_kernel_in_hyp_mode()) + trfcr |= TRFCR_EL2_CX; + +- write_trfcr(trfcr); ++ drvdata->trfcr = trfcr; + } + + static void etm4_init_arch_data(void *info) +@@ -1177,7 +1208,7 @@ static void etm4_init_arch_data(void *info) + /* NUMCNTR, bits[30:28] number of counters available for tracing */ + drvdata->nr_cntr = BMVAL(etmidr5, 28, 30); + etm4_cs_lock(drvdata, csa); +- cpu_enable_tracing(drvdata); ++ cpu_detect_trace_filtering(drvdata); + } + + static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config) +@@ -1673,7 +1704,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) + int ret = 0; + + /* Save the TRFCR irrespective of whether the ETM is ON */ +- if (drvdata->trfc) ++ if (drvdata->trfcr) + drvdata->save_trfcr = read_trfcr(); + /* + * Save and restore the ETM Trace registers only if +@@ -1782,7 +1813,7 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) + + static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) + { +- if (drvdata->trfc) ++ if (drvdata->trfcr) + write_trfcr(drvdata->save_trfcr); + if (drvdata->state_needs_restore) + __etm4_cpu_restore(drvdata); +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h +index 82cba16b73a6..3c4d69b096ca 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x.h ++++ b/drivers/hwtracing/coresight/coresight-etm4x.h +@@ -919,7 +919,10 @@ struct etmv4_save_state { + * @nooverflow: Indicate if overflow prevention is supported. + * @atbtrig: If the implementation can support ATB triggers + * @lpoverride: If the implementation can support low-power state over. +- * @trfc: If the implementation supports Arm v8.4 trace filter controls. ++ * @trfcr: If the CPU supports FEAT_TRF, value of the TRFCR_ELx that ++ * allows tracing at all ELs. We don't want to compute this ++ * at runtime, due to the additional setting of TRFCR_CX when ++ * in EL2. Otherwise, 0. + * @config: structure holding configuration parameters. + * @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event. + * @save_state: State to be preserved across power loss +@@ -972,7 +975,7 @@ struct etmv4_drvdata { + bool nooverflow; + bool atbtrig; + bool lpoverride; +- bool trfc; ++ u64 trfcr; + struct etmv4_config config; + u64 save_trfcr; + struct etmv4_save_state *save_state; +diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h +index 303d71911870..23f05df3f173 100644 +--- a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h ++++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h +@@ -21,4 +21,11 @@ static inline void write_trfcr(u64 val) + isb(); + } + ++static inline void cpu_prohibit_trace(void) ++{ ++ u64 trfcr = read_trfcr(); ++ ++ /* Prohibit tracing at EL0 & the kernel EL */ ++ write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE)); ++} + #endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */ +-- +2.34.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0017-perf-arm-cmn-Use-irq_set_affinity.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0017-perf-arm-cmn-Use-irq_set_affinity.patch new file mode 100644 index 0000000000..e8674c33ea --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0017-perf-arm-cmn-Use-irq_set_affinity.patch @@ -0,0 +1,74 @@ +From ad3c5d9224ffcd7b2e083f03441c6188d2bbef67 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 18 May 2021 11:17:28 +0200 +Subject: [PATCH 01/14] perf/arm-cmn: Use irq_set_affinity() + +The driver uses irq_set_affinity_hint() to set the affinity for the PMU +interrupts, which relies on the undocumented side effect that this function +actually sets the affinity under the hood. + +Setting an hint is clearly not a guarantee and for these PMU interrupts an +affinity hint, which is supposed to guide userspace for setting affinity, +is beyond pointless, because the affinity of these interrupts cannot be +modified from user space. + +Aside of that the error checks are bogus because the only error which is +returned from irq_set_affinity_hint() is when there is no irq descriptor +for the interrupt number, but not when the affinity set fails. That's on +purpose because the hint can point to an offline CPU. + +Replace the mindless abuse with irq_set_affinity(). + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + +Link: https://lore.kernel.org/r/20210518093118.277228577@linutronix.de +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/20210518093118.277228577@linutronix.de] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 9 ++------- + 1 file changed, 2 insertions(+), 7 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 46defb1dcf86..38fa6f89d0bc 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -1162,7 +1162,7 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) + + perf_pmu_migrate_context(&cmn->pmu, cpu, target); + for (i = 0; i < cmn->num_dtcs; i++) +- irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target)); ++ irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target)); + cmn->cpu = target; + return 0; + } +@@ -1222,7 +1222,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn) + if (err) + return err; + +- err = irq_set_affinity_hint(irq, cpumask_of(cmn->cpu)); ++ err = irq_set_affinity(irq, cpumask_of(cmn->cpu)); + if (err) + return err; + next: +@@ -1568,16 +1568,11 @@ static int arm_cmn_probe(struct platform_device *pdev) + static int arm_cmn_remove(struct platform_device *pdev) + { + struct arm_cmn *cmn = platform_get_drvdata(pdev); +- int i; + + writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); + + perf_pmu_unregister(&cmn->pmu); + cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); +- +- for (i = 0; i < cmn->num_dtcs; i++) +- irq_set_affinity_hint(cmn->dtc[i].irq, NULL); +- + return 0; + } + +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0018-perf-arm-cmn-Fix-CPU-hotplug-unregistration.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0018-perf-arm-cmn-Fix-CPU-hotplug-unregistration.patch new file mode 100644 index 0000000000..e06fb888bf --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0018-perf-arm-cmn-Fix-CPU-hotplug-unregistration.patch @@ -0,0 +1,46 @@ +From 249304c3517a38863c8e45e63d509d01bd67dead Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:50 +0000 +Subject: [PATCH 02/14] perf/arm-cmn: Fix CPU hotplug unregistration + +Attempting to migrate the PMU context after we've unregistered the PMU +device, or especially if we never successfully registered it in the +first place, is a woefully bad idea. It's also fundamentally pointless +anyway. Make sure to unregister an instance from the hotplug handler +*without* invoking the teardown callback. + +Fixes: 0ba64770a2f2 ("perf: Add Arm CMN-600 PMU driver") +Signed-off-by: Robin Murphy <robin.murphy@arm.com> + +Upstream-Status: Backport [https://lore.kernel.org/r/2c221d745544774e4b07583b65b5d4d94f7e0fe4.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 38fa6f89d0bc..fe7f3e945481 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -1561,7 +1561,8 @@ static int arm_cmn_probe(struct platform_device *pdev) + + err = perf_pmu_register(&cmn->pmu, name, -1); + if (err) +- cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); ++ cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); ++ + return err; + } + +@@ -1572,7 +1573,7 @@ static int arm_cmn_remove(struct platform_device *pdev) + writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); + + perf_pmu_unregister(&cmn->pmu); +- cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); ++ cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); + return 0; + } + +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0019-perf-arm-cmn-Account-for-NUMA-affinity.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0019-perf-arm-cmn-Account-for-NUMA-affinity.patch new file mode 100644 index 0000000000..f93bff7b43 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0019-perf-arm-cmn-Account-for-NUMA-affinity.patch @@ -0,0 +1,107 @@ +From c4b023618252ad8c03b7ae2cc411718af285bf66 Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:51 +0000 +Subject: [PATCH 03/14] perf/arm-cmn: Account for NUMA affinity + +On a system with multiple CMN meshes, ideally we'd want to access each +PMU from within its own mesh, rather than with a long CML round-trip, +wherever feasible. Since such a system is likely to be presented as +multiple NUMA nodes, let's also hope a proximity domain is specified +for each CMN programming interface, and use that to guide our choice +of IRQ affinity to favour a node-local CPU where possible. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/32438b0d016e0649d882d47d30ac2000484287b9.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/32438b0d016e0649d882d47d30ac2000484287b9.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 51 +++++++++++++++++++++++++++++++----------- + 1 file changed, 38 insertions(+), 13 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index fe7f3e945481..2146d1c0103f 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -1147,23 +1147,47 @@ static int arm_cmn_commit_txn(struct pmu *pmu) + return 0; + } + +-static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) ++static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu) ++{ ++ unsigned int i; ++ ++ perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu); ++ for (i = 0; i < cmn->num_dtcs; i++) ++ irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu)); ++ cmn->cpu = cpu; ++} ++ ++static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) + { + struct arm_cmn *cmn; +- unsigned int i, target; ++ int node; + +- cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node); +- if (cpu != cmn->cpu) +- return 0; ++ cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); ++ node = dev_to_node(cmn->dev); ++ if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) ++ arm_cmn_migrate(cmn, cpu); ++ return 0; ++} ++ ++static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) ++{ ++ struct arm_cmn *cmn; ++ unsigned int target; ++ int node; ++ cpumask_t mask; + +- target = cpumask_any_but(cpu_online_mask, cpu); +- if (target >= nr_cpu_ids) ++ cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); ++ if (cpu != cmn->cpu) + return 0; + +- perf_pmu_migrate_context(&cmn->pmu, cpu, target); +- for (i = 0; i < cmn->num_dtcs; i++) +- irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target)); +- cmn->cpu = target; ++ node = dev_to_node(cmn->dev); ++ if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && ++ cpumask_andnot(&mask, &mask, cpumask_of(cpu))) ++ target = cpumask_any(&mask); ++ else ++ target = cpumask_any_but(cpu_online_mask, cpu); ++ if (target < nr_cpu_ids) ++ arm_cmn_migrate(cmn, target); + return 0; + } + +@@ -1532,7 +1556,7 @@ static int arm_cmn_probe(struct platform_device *pdev) + if (err) + return err; + +- cmn->cpu = raw_smp_processor_id(); ++ cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); + cmn->pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = arm_cmn_attr_groups, +@@ -1608,7 +1632,8 @@ static int __init arm_cmn_init(void) + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, +- "perf/arm/cmn:online", NULL, ++ "perf/arm/cmn:online", ++ arm_cmn_pmu_online_cpu, + arm_cmn_pmu_offline_cpu); + if (ret < 0) + return ret; +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0020-perf-arm-cmn-Drop-compile-test-restriction.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0020-perf-arm-cmn-Drop-compile-test-restriction.patch new file mode 100644 index 0000000000..1a3e2d9792 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0020-perf-arm-cmn-Drop-compile-test-restriction.patch @@ -0,0 +1,89 @@ +From c3b11ad7a7e3e154a17f36c6768deab9227e28de Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:52 +0000 +Subject: [PATCH 04/14] perf/arm-cmn: Drop compile-test restriction + +Although CMN is currently (and overwhelmingly likely to remain) deployed +in arm64-only (modulo userspace) systems, the 64-bit "dependency" for +compile-testing was just laziness due to heavy reliance on readq/writeq +accessors. Since we only need one extra include for robustness in that +regard, let's pull that in, widen the compile-test coverage, and fix up +the smattering of type laziness that that brings to light. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/baee9ee0d0bdad8aaeb70f5a4b98d8fd4b1f5786.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/baee9ee0d0bdad8aaeb70f5a4b98d8fd4b1f5786.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/Kconfig | 2 +- + drivers/perf/arm-cmn.c | 25 +++++++++++++------------ + 2 files changed, 14 insertions(+), 13 deletions(-) + +diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig +index 130327ff0b0e..828a042d6a07 100644 +--- a/drivers/perf/Kconfig ++++ b/drivers/perf/Kconfig +@@ -43,7 +43,7 @@ config ARM_CCN + + config ARM_CMN + tristate "Arm CMN-600 PMU support" +- depends on ARM64 || (COMPILE_TEST && 64BIT) ++ depends on ARM64 || COMPILE_TEST + help + Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh + Network interconnect. +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 2146d1c0103f..e9af79b5f3de 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -7,6 +7,7 @@ + #include <linux/bitops.h> + #include <linux/interrupt.h> + #include <linux/io.h> ++#include <linux/io-64-nonatomic-lo-hi.h> + #include <linux/kernel.h> + #include <linux/list.h> + #include <linux/module.h> +@@ -122,11 +123,11 @@ + + + /* Event attributes */ +-#define CMN_CONFIG_TYPE GENMASK(15, 0) +-#define CMN_CONFIG_EVENTID GENMASK(23, 16) +-#define CMN_CONFIG_OCCUPID GENMASK(27, 24) +-#define CMN_CONFIG_BYNODEID BIT(31) +-#define CMN_CONFIG_NODEID GENMASK(47, 32) ++#define CMN_CONFIG_TYPE GENMASK_ULL(15, 0) ++#define CMN_CONFIG_EVENTID GENMASK_ULL(23, 16) ++#define CMN_CONFIG_OCCUPID GENMASK_ULL(27, 24) ++#define CMN_CONFIG_BYNODEID BIT_ULL(31) ++#define CMN_CONFIG_NODEID GENMASK_ULL(47, 32) + + #define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config) + #define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config) +@@ -134,13 +135,13 @@ + #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config) + #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) + +-#define CMN_CONFIG_WP_COMBINE GENMASK(27, 24) +-#define CMN_CONFIG_WP_DEV_SEL BIT(48) +-#define CMN_CONFIG_WP_CHN_SEL GENMASK(50, 49) +-#define CMN_CONFIG_WP_GRP BIT(52) +-#define CMN_CONFIG_WP_EXCLUSIVE BIT(53) +-#define CMN_CONFIG1_WP_VAL GENMASK(63, 0) +-#define CMN_CONFIG2_WP_MASK GENMASK(63, 0) ++#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24) ++#define CMN_CONFIG_WP_DEV_SEL BIT_ULL(48) ++#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(50, 49) ++#define CMN_CONFIG_WP_GRP BIT_ULL(52) ++#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(53) ++#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0) ++#define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0) + + #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config) + #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config) +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0021-perf-arm-cmn-Refactor-node-ID-handling.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0021-perf-arm-cmn-Refactor-node-ID-handling.patch new file mode 100644 index 0000000000..ce4c2e5354 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0021-perf-arm-cmn-Refactor-node-ID-handling.patch @@ -0,0 +1,155 @@ +From 4f4a4cd7c79396fa72870ff712d15e82ebff80cf Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:53 +0000 +Subject: [PATCH 05/14] perf/arm-cmn: Refactor node ID handling + +Add a bit more abstraction for the places where we decompose node IDs. +This will help keep things nice and manageable when we come to add yet +more variables which affect the node ID format. Also use the opportunity +to move the rest of the low-level node management helpers back up to the +logical place they were meant to be - how they ended up buried right in +the middle of the event-related definitions is somewhat of a mystery... + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/a2242a8c3c96056c13a04ae87bf2047e5e64d2d9.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/a2242a8c3c96056c13a04ae87bf2047e5e64d2d9.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 94 +++++++++++++++++++++++++----------------- + 1 file changed, 56 insertions(+), 38 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index e9af79b5f3de..cee301fe0f7e 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -255,6 +255,58 @@ struct arm_cmn { + + static int arm_cmn_hp_state; + ++struct arm_cmn_nodeid { ++ u8 x; ++ u8 y; ++ u8 port; ++ u8 dev; ++}; ++ ++static int arm_cmn_xyidbits(const struct arm_cmn *cmn) ++{ ++ int dim = max(cmn->mesh_x, cmn->mesh_y); ++ ++ return dim > 4 ? 3 : 2; ++} ++ ++static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) ++{ ++ struct arm_cmn_nodeid nid; ++ int bits = arm_cmn_xyidbits(cmn); ++ ++ nid.x = CMN_NODEID_X(id, bits); ++ nid.y = CMN_NODEID_Y(id, bits); ++ nid.port = CMN_NODEID_PID(id); ++ nid.dev = CMN_NODEID_DEVID(id); ++ ++ return nid; ++} ++ ++static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn, ++ struct arm_cmn_node *dn) ++{ ++ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); ++ int xp_idx = cmn->mesh_x * nid.y + nid.x; ++ ++ dn->to_xp = (cmn->xps + xp_idx) - dn; ++} ++ ++static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn) ++{ ++ return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp; ++} ++ ++static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, ++ enum cmn_node_type type) ++{ ++ int i; ++ ++ for (i = 0; i < cmn->num_dns; i++) ++ if (cmn->dns[i].type == type) ++ return &cmn->dns[i]; ++ return NULL; ++} ++ + struct arm_cmn_hw_event { + struct arm_cmn_node *dn; + u64 dtm_idx[2]; +@@ -295,38 +347,6 @@ struct arm_cmn_format_attr { + int config; + }; + +-static int arm_cmn_xyidbits(const struct arm_cmn *cmn) +-{ +- return cmn->mesh_x > 4 || cmn->mesh_y > 4 ? 3 : 2; +-} +- +-static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn, +- struct arm_cmn_node *dn) +-{ +- int bits = arm_cmn_xyidbits(cmn); +- int x = CMN_NODEID_X(dn->id, bits); +- int y = CMN_NODEID_Y(dn->id, bits); +- int xp_idx = cmn->mesh_x * y + x; +- +- dn->to_xp = (cmn->xps + xp_idx) - dn; +-} +- +-static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn) +-{ +- return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp; +-} +- +-static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, +- enum cmn_node_type type) +-{ +- int i; +- +- for (i = 0; i < cmn->num_dns; i++) +- if (cmn->dns[i].type == type) +- return &cmn->dns[i]; +- return NULL; +-} +- + #define CMN_EVENT_ATTR(_name, _type, _eventid, _occupid) \ + (&((struct arm_cmn_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \ +@@ -966,11 +986,10 @@ static int arm_cmn_event_init(struct perf_event *event) + } + + if (!hw->num_dns) { +- int bits = arm_cmn_xyidbits(cmn); ++ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); + + dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", +- nodeid, CMN_NODEID_X(nodeid, bits), CMN_NODEID_Y(nodeid, bits), +- CMN_NODEID_PID(nodeid), CMN_NODEID_DEVID(nodeid), type); ++ nodeid, nid.x, nid.y, nid.port, nid.dev, type); + return -EINVAL; + } + /* +@@ -1068,11 +1087,10 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + dn->wp_event[wp_idx] = dtc_idx; + writel_relaxed(cfg, dn->pmu_base + CMN_DTM_WPn_CONFIG(wp_idx)); + } else { +- unsigned int port = CMN_NODEID_PID(dn->id); +- unsigned int dev = CMN_NODEID_DEVID(dn->id); ++ struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + + input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx + +- (port << 4) + (dev << 2); ++ (nid.port << 4) + (nid.dev << 2); + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) { + int occupid = CMN_EVENT_OCCUPID(event); +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0022-perf-arm-cmn-Streamline-node-iteration.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0022-perf-arm-cmn-Streamline-node-iteration.patch new file mode 100644 index 0000000000..fbe49b2ff8 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0022-perf-arm-cmn-Streamline-node-iteration.patch @@ -0,0 +1,118 @@ +From 1b8e1ce0ebaa02c4cb7fa615b28c6905b0884e41 Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:54 +0000 +Subject: [PATCH 06/14] perf/arm-cmn: Streamline node iteration + +Refactor the places where we scan through the set of nodes to switch +from explicit array indexing to pointer-based iteration. This leads to +slightly simpler object code, but also makes the source less dense and +more pleasant for further development. It also unearths an almost-bug +in arm_cmn_event_init() where we've been depending on the "array index" +of NULL relative to cmn->dns being a sufficiently large number, yuck. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/ee0c9eda9a643f46001ac43aadf3f0b1fd5660dd.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/ee0c9eda9a643f46001ac43aadf3f0b1fd5660dd.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 33 ++++++++++++++++++++------------- + 1 file changed, 20 insertions(+), 13 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index cee301fe0f7e..77ebed7fae08 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -299,11 +299,11 @@ static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn) + static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, + enum cmn_node_type type) + { +- int i; ++ struct arm_cmn_node *dn; + +- for (i = 0; i < cmn->num_dns; i++) +- if (cmn->dns[i].type == type) +- return &cmn->dns[i]; ++ for (dn = cmn->dns; dn->type; dn++) ++ if (dn->type == type) ++ return dn; + return NULL; + } + +@@ -941,8 +941,8 @@ static int arm_cmn_event_init(struct perf_event *event) + { + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); ++ struct arm_cmn_node *dn; + enum cmn_node_type type; +- unsigned int i; + bool bynodeid; + u16 nodeid, eventid; + +@@ -974,10 +974,12 @@ static int arm_cmn_event_init(struct perf_event *event) + nodeid = CMN_EVENT_NODEID(event); + + hw->dn = arm_cmn_node(cmn, type); +- for (i = hw->dn - cmn->dns; i < cmn->num_dns && cmn->dns[i].type == type; i++) { ++ if (!hw->dn) ++ return -EINVAL; ++ for (dn = hw->dn; dn->type == type; dn++) { + if (!bynodeid) { + hw->num_dns++; +- } else if (cmn->dns[i].id != nodeid) { ++ } else if (dn->id != nodeid) { + hw->dn++; + } else { + hw->num_dns = 1; +@@ -1332,7 +1334,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + + cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); + +- for (dn = cmn->dns; dn < cmn->dns + cmn->num_dns; dn++) { ++ for (dn = cmn->dns; dn->type; dn++) { + if (dn->type != CMN_TYPE_XP) + arm_cmn_init_node_to_xp(cmn, dn); + else if (cmn->num_dtcs == 1) +@@ -1382,6 +1384,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + u32 xp_offset[CMN_MAX_XPS]; + u64 reg; + int i, j; ++ size_t sz; + + cfg_region = cmn->base + rgn_offset; + reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); +@@ -1408,14 +1411,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); + } + +- /* Cheeky +1 to help terminate pointer-based iteration */ +- cmn->dns = devm_kcalloc(cmn->dev, cmn->num_dns + 1, +- sizeof(*cmn->dns), GFP_KERNEL); +- if (!cmn->dns) ++ /* Cheeky +1 to help terminate pointer-based iteration later */ ++ dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL); ++ if (!dn) + return -ENOMEM; + + /* Pass 2: now we can actually populate the nodes */ +- dn = cmn->dns; ++ cmn->dns = dn; + for (i = 0; i < cmn->num_xps; i++) { + void __iomem *xp_region = cmn->base + xp_offset[i]; + struct arm_cmn_node *xp = dn++; +@@ -1484,6 +1486,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + /* Correct for any nodes we skipped */ + cmn->num_dns = dn - cmn->dns; + ++ sz = (void *)(dn + 1) - (void *)cmn->dns; ++ dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); ++ if (dn) ++ cmn->dns = dn; ++ + /* + * If mesh_x wasn't set during discovery then we never saw + * an XP at (0,1), thus we must have an Nx1 configuration. +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0023-drivers-perf-arm-cmn-Add-space-after.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0023-drivers-perf-arm-cmn-Add-space-after.patch new file mode 100644 index 0000000000..3b11192155 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0023-drivers-perf-arm-cmn-Add-space-after.patch @@ -0,0 +1,34 @@ +From c3e137a2231f434f623593b6951c7575d22e1cdb Mon Sep 17 00:00:00 2001 +From: Junhao He <hejunhao2@hisilicon.com> +Date: Tue, 11 May 2021 20:27:33 +0800 +Subject: [PATCH 07/14] drivers/perf: arm-cmn: Add space after ',' + +Fix a warning from checkpatch.pl. + +ERROR: space required after that ',' (ctx:VxV) + +Signed-off-by: Junhao He <hejunhao2@hisilicon.com> +Signed-off-by: Jay Fang <f.fangjian@huawei.com> + +Upstream-Status: Backport [https://lore.kernel.org/all/1620736054-58412-4-git-send-email-f.fangjian@huawei.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 77ebed7fae08..e9f27f7776a2 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -32,7 +32,7 @@ + #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) + #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) + +-#define CMN_CHILD_NODE_ADDR GENMASK(27,0) ++#define CMN_CHILD_NODE_ADDR GENMASK(27, 0) + #define CMN_CHILD_NODE_EXTERNAL BIT(31) + + #define CMN_ADDR_NODE_PTR GENMASK(27, 14) +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0024-perf-arm-cmn-Refactor-DTM-handling.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0024-perf-arm-cmn-Refactor-DTM-handling.patch new file mode 100644 index 0000000000..6a68a79925 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0024-perf-arm-cmn-Refactor-DTM-handling.patch @@ -0,0 +1,406 @@ +From 79bbc3eeee54b2e039dd4822e4a643e71adfbb04 Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:55 +0000 +Subject: [PATCH 08/14] perf/arm-cmn: Refactor DTM handling + +Untangle DTMs from XPs into a dedicated abstraction. This helps make +things a little more obvious and robust, but primarily paves the way +for further development where new IPs can grow extra DTMs per XP. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/9cca18b1b98f482df7f1aaf3d3213e7f39500423.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/9cca18b1b98f482df7f1aaf3d3213e7f39500423.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 169 +++++++++++++++++++++-------------------- + 1 file changed, 87 insertions(+), 82 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index e9f27f7776a2..2ae3e92690a7 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -35,14 +35,9 @@ + #define CMN_CHILD_NODE_ADDR GENMASK(27, 0) + #define CMN_CHILD_NODE_EXTERNAL BIT(31) + +-#define CMN_ADDR_NODE_PTR GENMASK(27, 14) +- +-#define CMN_NODE_PTR_DEVID(ptr) (((ptr) >> 2) & 3) +-#define CMN_NODE_PTR_PID(ptr) ((ptr) & 1) +-#define CMN_NODE_PTR_X(ptr, bits) ((ptr) >> (6 + (bits))) +-#define CMN_NODE_PTR_Y(ptr, bits) (((ptr) >> 6) & ((1U << (bits)) - 1)) +- +-#define CMN_MAX_XPS (8 * 8) ++#define CMN_MAX_DIMENSION 8 ++#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) ++#define CMN_MAX_DTMS CMN_MAX_XPS + + /* The CFG node has one other useful purpose */ + #define CMN_CFGM_PERIPH_ID_2 0x0010 +@@ -190,32 +185,32 @@ struct arm_cmn_node { + u16 id, logid; + enum cmn_node_type type; + ++ int dtm; + union { +- /* Device node */ ++ /* DN/HN-F/CXHA */ + struct { +- int to_xp; +- /* DN/HN-F/CXHA */ +- unsigned int occupid_val; +- unsigned int occupid_count; ++ u8 occupid_val; ++ u8 occupid_count; + }; + /* XP */ +- struct { +- int dtc; +- u32 pmu_config_low; +- union { +- u8 input_sel[4]; +- __le32 pmu_config_high; +- }; +- s8 wp_event[4]; +- }; ++ int dtc; + }; +- + union { + u8 event[4]; + __le32 event_sel; + }; + }; + ++struct arm_cmn_dtm { ++ void __iomem *base; ++ u32 pmu_config_low; ++ union { ++ u8 input_sel[4]; ++ __le32 pmu_config_high; ++ }; ++ s8 wp_event[4]; ++}; ++ + struct arm_cmn_dtc { + void __iomem *base; + int irq; +@@ -241,6 +236,7 @@ struct arm_cmn { + struct arm_cmn_node *xps; + struct arm_cmn_node *dns; + ++ struct arm_cmn_dtm *dtms; + struct arm_cmn_dtc *dtc; + unsigned int num_dtcs; + +@@ -282,20 +278,14 @@ static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) + return nid; + } + +-static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn, +- struct arm_cmn_node *dn) ++static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, ++ const struct arm_cmn_node *dn) + { + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + int xp_idx = cmn->mesh_x * nid.y + nid.x; + +- dn->to_xp = (cmn->xps + xp_idx) - dn; +-} +- +-static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn) +-{ +- return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp; ++ return cmn->xps + xp_idx; + } +- + static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, + enum cmn_node_type type) + { +@@ -706,9 +696,9 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, + + offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT; + for_each_hw_dn(hw, dn, i) { +- struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn); ++ struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm]; + int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); +- u64 reg = readq_relaxed(xp->pmu_base + offset); ++ u64 reg = readq_relaxed(dtm->base + offset); + u16 dtm_count = reg >> (dtm_idx * 16); + + count += dtm_count; +@@ -835,9 +825,9 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) + } + + struct arm_cmn_val { +- u8 dtm_count[CMN_MAX_XPS]; +- u8 occupid[CMN_MAX_XPS]; +- u8 wp[CMN_MAX_XPS][4]; ++ u8 dtm_count[CMN_MAX_DTMS]; ++ u8 occupid[CMN_MAX_DTMS]; ++ u8 wp[CMN_MAX_DTMS][4]; + int dtc_count; + bool cycles; + }; +@@ -866,16 +856,16 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev + occupid = 0; + + for_each_hw_dn(hw, dn, i) { +- int wp_idx, xp = arm_cmn_node_to_xp(dn)->logid; ++ int wp_idx, dtm = dn->dtm; + +- val->dtm_count[xp]++; +- val->occupid[xp] = occupid; ++ val->dtm_count[dtm]++; ++ val->occupid[dtm] = occupid; + + if (type != CMN_TYPE_WP) + continue; + + wp_idx = arm_cmn_wp_idx(event); +- val->wp[xp][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; ++ val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; + } + } + +@@ -914,22 +904,22 @@ static int arm_cmn_validate_group(struct perf_event *event) + occupid = 0; + + for_each_hw_dn(hw, dn, i) { +- int wp_idx, wp_cmb, xp = arm_cmn_node_to_xp(dn)->logid; ++ int wp_idx, wp_cmb, dtm = dn->dtm; + +- if (val.dtm_count[xp] == CMN_DTM_NUM_COUNTERS) ++ if (val.dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) + return -EINVAL; + +- if (occupid && val.occupid[xp] && occupid != val.occupid[xp]) ++ if (occupid && val.occupid[dtm] && occupid != val.occupid[dtm]) + return -EINVAL; + + if (type != CMN_TYPE_WP) + continue; + + wp_idx = arm_cmn_wp_idx(event); +- if (val.wp[xp][wp_idx]) ++ if (val.wp[dtm][wp_idx]) + return -EINVAL; + +- wp_cmb = val.wp[xp][wp_idx ^ 1]; ++ wp_cmb = val.wp[dtm][wp_idx ^ 1]; + if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1) + return -EINVAL; + } +@@ -1010,17 +1000,17 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, + enum cmn_node_type type = CMN_EVENT_TYPE(event); + + while (i--) { +- struct arm_cmn_node *xp = arm_cmn_node_to_xp(hw->dn + i); ++ struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm]; + unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + + if (type == CMN_TYPE_WP) +- hw->dn[i].wp_event[arm_cmn_wp_idx(event)] = -1; ++ dtm->wp_event[arm_cmn_wp_idx(event)] = -1; + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + hw->dn[i].occupid_count--; + +- xp->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); +- writel_relaxed(xp->pmu_config_low, xp->pmu_base + CMN_DTM_PMU_CONFIG); ++ dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); ++ writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); + } + memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); + +@@ -1062,12 +1052,12 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + + /* ...then the local counters to feed it. */ + for_each_hw_dn(hw, dn, i) { +- struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn); ++ struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm]; + unsigned int dtm_idx, shift; + u64 reg; + + dtm_idx = 0; +- while (xp->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) ++ while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) + if (++dtm_idx == CMN_DTM_NUM_COUNTERS) + goto free_dtms; + +@@ -1077,17 +1067,17 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + int tmp, wp_idx = arm_cmn_wp_idx(event); + u32 cfg = arm_cmn_wp_config(event); + +- if (dn->wp_event[wp_idx] >= 0) ++ if (dtm->wp_event[wp_idx] >= 0) + goto free_dtms; + +- tmp = dn->wp_event[wp_idx ^ 1]; ++ tmp = dtm->wp_event[wp_idx ^ 1]; + if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != + CMN_EVENT_WP_COMBINE(dtc->counters[tmp])) + goto free_dtms; + + input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; +- dn->wp_event[wp_idx] = dtc_idx; +- writel_relaxed(cfg, dn->pmu_base + CMN_DTM_WPn_CONFIG(wp_idx)); ++ dtm->wp_event[wp_idx] = dtc_idx; ++ writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); + } else { + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + +@@ -1095,7 +1085,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + (nid.port << 4) + (nid.dev << 2); + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) { +- int occupid = CMN_EVENT_OCCUPID(event); ++ u8 occupid = CMN_EVENT_OCCUPID(event); + + if (dn->occupid_count == 0) { + dn->occupid_val = occupid; +@@ -1110,13 +1100,13 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + + arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); + +- xp->input_sel[dtm_idx] = input_sel; ++ dtm->input_sel[dtm_idx] = input_sel; + shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx); +- xp->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); +- xp->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; +- xp->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); +- reg = (u64)le32_to_cpu(xp->pmu_config_high) << 32 | xp->pmu_config_low; +- writeq_relaxed(reg, xp->pmu_base + CMN_DTM_PMU_CONFIG); ++ dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); ++ dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; ++ dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); ++ reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; ++ writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); + } + + /* Go go go! */ +@@ -1276,23 +1266,22 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn) + return 0; + } + +-static void arm_cmn_init_dtm(struct arm_cmn_node *xp) ++static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp) + { + int i; + ++ dtm->base = xp->pmu_base; ++ dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; + for (i = 0; i < 4; i++) { +- xp->wp_event[i] = -1; +- writeq_relaxed(0, xp->pmu_base + CMN_DTM_WPn_MASK(i)); +- writeq_relaxed(~0ULL, xp->pmu_base + CMN_DTM_WPn_VAL(i)); ++ dtm->wp_event[i] = -1; ++ writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i)); ++ writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i)); + } +- xp->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; +- xp->dtc = -1; + } + + static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx) + { + struct arm_cmn_dtc *dtc = cmn->dtc + idx; +- struct arm_cmn_node *xp; + + dtc->base = dn->pmu_base - CMN_PMU_OFFSET; + dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); +@@ -1303,10 +1292,6 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id + writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); + writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); + +- /* We do at least know that a DTC's XP must be in that DTC's domain */ +- xp = arm_cmn_node_to_xp(dn); +- xp->dtc = idx; +- + return 0; + } + +@@ -1323,7 +1308,7 @@ static int arm_cmn_node_cmp(const void *a, const void *b) + + static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + { +- struct arm_cmn_node *dn; ++ struct arm_cmn_node *dn, *xp; + int dtc_idx = 0; + + cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); +@@ -1335,13 +1320,24 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); + + for (dn = cmn->dns; dn->type; dn++) { +- if (dn->type != CMN_TYPE_XP) +- arm_cmn_init_node_to_xp(cmn, dn); +- else if (cmn->num_dtcs == 1) +- dn->dtc = 0; ++ if (dn->type == CMN_TYPE_XP) { ++ if (dn->dtc < 0 && cmn->num_dtcs == 1) ++ dn->dtc = 0; ++ continue; ++ } + +- if (dn->type == CMN_TYPE_DTC) +- arm_cmn_init_dtc(cmn, dn, dtc_idx++); ++ xp = arm_cmn_node_to_xp(cmn, dn); ++ dn->dtm = xp->dtm; ++ ++ if (dn->type == CMN_TYPE_DTC) { ++ int err; ++ /* We do at least know that a DTC's XP must be in that DTC's domain */ ++ if (xp->dtc < 0) ++ xp->dtc = dtc_idx; ++ err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); ++ if (err) ++ return err; ++ } + + /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ + if (dn->type == CMN_TYPE_RND) +@@ -1380,6 +1376,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + { + void __iomem *cfg_region; + struct arm_cmn_node cfg, *dn; ++ struct arm_cmn_dtm *dtm; + u16 child_count, child_poff; + u32 xp_offset[CMN_MAX_XPS]; + u64 reg; +@@ -1416,14 +1413,18 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + if (!dn) + return -ENOMEM; + ++ dtm = devm_kcalloc(cmn->dev, cmn->num_xps, sizeof(*dtm), GFP_KERNEL); ++ if (!dtm) ++ return -ENOMEM; ++ + /* Pass 2: now we can actually populate the nodes */ + cmn->dns = dn; ++ cmn->dtms = dtm; + for (i = 0; i < cmn->num_xps; i++) { + void __iomem *xp_region = cmn->base + xp_offset[i]; + struct arm_cmn_node *xp = dn++; + + arm_cmn_init_node_info(cmn, xp_offset[i], xp); +- arm_cmn_init_dtm(xp); + /* + * Thanks to the order in which XP logical IDs seem to be + * assigned, we can handily infer the mesh X dimension by +@@ -1433,6 +1434,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + if (xp->id == (1 << 3)) + cmn->mesh_x = xp->logid; + ++ xp->dtc = -1; ++ xp->dtm = dtm - cmn->dtms; ++ arm_cmn_init_dtm(dtm++, xp); ++ + reg = readq_relaxed(xp_region + CMN_CHILD_INFO); + child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); + child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0025-perf-arm-cmn-Optimise-DTM-counter-reads.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0025-perf-arm-cmn-Optimise-DTM-counter-reads.patch new file mode 100644 index 0000000000..af334686fb --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0025-perf-arm-cmn-Optimise-DTM-counter-reads.patch @@ -0,0 +1,56 @@ +From a63878c01597e21451c2b3f239cbf0a2fbdeeadf Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:56 +0000 +Subject: [PATCH 09/14] perf/arm-cmn: Optimise DTM counter reads + +When multiple nodes of the same type are connected to the same XP +(particularly in CAL configurations), it seems that they are likely +to be consecutive in logical ID. Therefore, we're likely to gain a +small benefit from an easy tweak to optimise out consecutive reads +of the same set of DTM counters for an aggregated event. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/7777d77c2df17693cd3dabb6e268906e15238d82.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/7777d77c2df17693cd3dabb6e268906e15238d82.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 17 +++++++++-------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 2ae3e92690a7..5fa31ebc1fce 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -690,18 +690,19 @@ static void arm_cmn_pmu_disable(struct pmu *pmu) + static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, + bool snapshot) + { ++ struct arm_cmn_dtm *dtm = NULL; + struct arm_cmn_node *dn; +- unsigned int i, offset; +- u64 count = 0; ++ unsigned int i, offset, dtm_idx; ++ u64 reg, count = 0; + + offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT; + for_each_hw_dn(hw, dn, i) { +- struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm]; +- int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); +- u64 reg = readq_relaxed(dtm->base + offset); +- u16 dtm_count = reg >> (dtm_idx * 16); +- +- count += dtm_count; ++ if (dtm != &cmn->dtms[dn->dtm]) { ++ dtm = &cmn->dtms[dn->dtm]; ++ reg = readq_relaxed(dtm->base + offset); ++ } ++ dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); ++ count += (u16)(reg >> (dtm_idx * 16)); + } + return count; + } +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0026-perf-arm-cmn-Optimise-DTC-counter-accesses.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0026-perf-arm-cmn-Optimise-DTC-counter-accesses.patch new file mode 100644 index 0000000000..56334e8032 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0026-perf-arm-cmn-Optimise-DTC-counter-accesses.patch @@ -0,0 +1,111 @@ +From 782b7cd98e6a6b8c5fcd9e20f5c534617b1f04d3 Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:57 +0000 +Subject: [PATCH 10/14] perf/arm-cmn: Optimise DTC counter accesses + +In cases where we do know which DTC domain a node belongs to, we can +skip initialising or reading the global count in DTCs where we know +it won't change. The machinery to achieve that is mostly in place +already, so finish hooking it up by converting the vestigial domain +tracking to propagate suitable bitmaps all the way through to events. + +Note that this does not allow allocating such an unused counter to a +different event on that DTC, because that is a flippin' nightmare. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/51d930fd945ef51c81f5889ccca055c302b0a1d0.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/51d930fd945ef51c81f5889ccca055c302b0a1d0.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 29 ++++++++++++----------------- + 1 file changed, 12 insertions(+), 17 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 5fa31ebc1fce..2204d6500814 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -193,7 +193,7 @@ struct arm_cmn_node { + u8 occupid_count; + }; + /* XP */ +- int dtc; ++ u8 dtc; + }; + union { + u8 event[4]; +@@ -968,14 +968,14 @@ static int arm_cmn_event_init(struct perf_event *event) + if (!hw->dn) + return -EINVAL; + for (dn = hw->dn; dn->type == type; dn++) { +- if (!bynodeid) { +- hw->num_dns++; +- } else if (dn->id != nodeid) { ++ if (bynodeid && dn->id != nodeid) { + hw->dn++; +- } else { +- hw->num_dns = 1; +- break; ++ continue; + } ++ hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc; ++ hw->num_dns++; ++ if (bynodeid) ++ break; + } + + if (!hw->num_dns) { +@@ -985,11 +985,6 @@ static int arm_cmn_event_init(struct perf_event *event) + nodeid, nid.x, nid.y, nid.port, nid.dev, type); + return -EINVAL; + } +- /* +- * By assuming events count in all DTC domains, we cunningly avoid +- * needing to know anything about how XPs are assigned to domains. +- */ +- hw->dtcs_used = (1U << cmn->num_dtcs) - 1; + + return arm_cmn_validate_group(event); + } +@@ -1311,6 +1306,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + { + struct arm_cmn_node *dn, *xp; + int dtc_idx = 0; ++ u8 dtcs_present = (1 << cmn->num_dtcs) - 1; + + cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); + if (!cmn->dtc) +@@ -1322,8 +1318,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + + for (dn = cmn->dns; dn->type; dn++) { + if (dn->type == CMN_TYPE_XP) { +- if (dn->dtc < 0 && cmn->num_dtcs == 1) +- dn->dtc = 0; ++ dn->dtc &= dtcs_present; + continue; + } + +@@ -1333,8 +1328,8 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + if (dn->type == CMN_TYPE_DTC) { + int err; + /* We do at least know that a DTC's XP must be in that DTC's domain */ +- if (xp->dtc < 0) +- xp->dtc = dtc_idx; ++ if (xp->dtc == 0xf) ++ xp->dtc = 1 << dtc_idx; + err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); + if (err) + return err; +@@ -1435,7 +1430,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + if (xp->id == (1 << 3)) + cmn->mesh_x = xp->logid; + +- xp->dtc = -1; ++ xp->dtc = 0xf; + xp->dtm = dtm - cmn->dtms; + arm_cmn_init_dtm(dtm++, xp); + +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0027-perf-arm-cmn-Move-group-validation-data-off-stack.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0027-perf-arm-cmn-Move-group-validation-data-off-stack.patch new file mode 100644 index 0000000000..06adc56e6c --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0027-perf-arm-cmn-Move-group-validation-data-off-stack.patch @@ -0,0 +1,108 @@ +From d919e8bcbb790018e097cb8a01e7c840dcdb82aa Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:58 +0000 +Subject: [PATCH 11/14] perf/arm-cmn: Move group validation data off-stack + +With the value of CMN_MAX_DTMS increasing significantly, our validation +data structure is set to get quite big. Technically we could pack it at +least twice as densely, since we only need around 19 bits of information +per DTM, but that makes the code even more mind-bogglingly impenetrable, +and even half of "quite big" may still be uncomfortably large for a +stack frame (~1KB). Just move it to an off-stack allocation instead. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/0cabff2e5839ddc0979e757c55515966f65359e4.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/0cabff2e5839ddc0979e757c55515966f65359e4.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 43 ++++++++++++++++++++++++------------------ + 1 file changed, 25 insertions(+), 18 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 2204d6500814..b89a081d26ff 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -876,8 +876,8 @@ static int arm_cmn_validate_group(struct perf_event *event) + struct arm_cmn_node *dn; + struct perf_event *sibling, *leader = event->group_leader; + enum cmn_node_type type; +- struct arm_cmn_val val; +- int i; ++ struct arm_cmn_val *val; ++ int i, ret = -EINVAL; + u8 occupid; + + if (leader == event) +@@ -886,18 +886,22 @@ static int arm_cmn_validate_group(struct perf_event *event) + if (event->pmu != leader->pmu && !is_software_event(leader)) + return -EINVAL; + +- memset(&val, 0, sizeof(val)); ++ val = kzalloc(sizeof(*val), GFP_KERNEL); ++ if (!val) ++ return -ENOMEM; + +- arm_cmn_val_add_event(&val, leader); ++ arm_cmn_val_add_event(val, leader); + for_each_sibling_event(sibling, leader) +- arm_cmn_val_add_event(&val, sibling); ++ arm_cmn_val_add_event(val, sibling); + + type = CMN_EVENT_TYPE(event); +- if (type == CMN_TYPE_DTC) +- return val.cycles ? -EINVAL : 0; ++ if (type == CMN_TYPE_DTC) { ++ ret = val->cycles ? -EINVAL : 0; ++ goto done; ++ } + +- if (val.dtc_count == CMN_DT_NUM_COUNTERS) +- return -EINVAL; ++ if (val->dtc_count == CMN_DT_NUM_COUNTERS) ++ goto done; + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + occupid = CMN_EVENT_OCCUPID(event) + 1; +@@ -907,25 +911,28 @@ static int arm_cmn_validate_group(struct perf_event *event) + for_each_hw_dn(hw, dn, i) { + int wp_idx, wp_cmb, dtm = dn->dtm; + +- if (val.dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) +- return -EINVAL; ++ if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) ++ goto done; + +- if (occupid && val.occupid[dtm] && occupid != val.occupid[dtm]) +- return -EINVAL; ++ if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm]) ++ goto done; + + if (type != CMN_TYPE_WP) + continue; + + wp_idx = arm_cmn_wp_idx(event); +- if (val.wp[dtm][wp_idx]) +- return -EINVAL; ++ if (val->wp[dtm][wp_idx]) ++ goto done; + +- wp_cmb = val.wp[dtm][wp_idx ^ 1]; ++ wp_cmb = val->wp[dtm][wp_idx ^ 1]; + if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1) +- return -EINVAL; ++ goto done; + } + +- return 0; ++ ret = 0; ++done: ++ kfree(val); ++ return ret; + } + + static int arm_cmn_event_init(struct perf_event *event) +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0028-perf-arm-cmn-Demarcate-CMN-600-specifics.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0028-perf-arm-cmn-Demarcate-CMN-600-specifics.patch new file mode 100644 index 0000000000..0fbc5cbba4 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0028-perf-arm-cmn-Demarcate-CMN-600-specifics.patch @@ -0,0 +1,466 @@ +From 7400784247be42beb996f7538547c56acd6cfa0c Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:44:59 +0000 +Subject: [PATCH 12/14] perf/arm-cmn: Demarcate CMN-600 specifics + +In preparation for supporting newer CMN products, let's introduce a +means to differentiate the features and events which are specific to a +particular IP from those which remain common to the whole family. The +newer designs have also smoothed off some of the rough edges in terms +of discoverability, so separate out the parts of the flow which have +effectively now become CMN-600 quirks. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/9f6368cdca4c821d801138939508a5bba54ccabb.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/9f6368cdca4c821d801138939508a5bba54ccabb.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 313 +++++++++++++++++++++-------------------- + 1 file changed, 162 insertions(+), 151 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index b89a081d26ff..92ff273fbe58 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -151,7 +151,12 @@ + #define CMN_WP_DOWN 2 + + +-/* r0px probably don't exist in silicon, thankfully */ ++enum cmn_model { ++ CMN_ANY = -1, ++ CMN600 = 1, ++}; ++ ++/* CMN-600 r0px shouldn't exist in silicon, thankfully */ + enum cmn_revision { + CMN600_R1P0, + CMN600_R1P1, +@@ -159,6 +164,7 @@ enum cmn_revision { + CMN600_R1P3, + CMN600_R2P0, + CMN600_R3P0, ++ CMN600_R3P1, + }; + + enum cmn_node_type { +@@ -229,6 +235,7 @@ struct arm_cmn { + void __iomem *base; + + enum cmn_revision rev; ++ enum cmn_model model; + u8 mesh_x; + u8 mesh_y; + u16 num_xps; +@@ -326,6 +333,7 @@ static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos) + + struct arm_cmn_event_attr { + struct device_attribute attr; ++ enum cmn_model model; + enum cmn_node_type type; + u8 eventid; + u8 occupid; +@@ -337,9 +345,10 @@ struct arm_cmn_format_attr { + int config; + }; + +-#define CMN_EVENT_ATTR(_name, _type, _eventid, _occupid) \ ++#define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid) \ + (&((struct arm_cmn_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \ ++ .model = _model, \ + .type = _type, \ + .eventid = _eventid, \ + .occupid = _occupid, \ +@@ -386,12 +395,15 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + eattr = container_of(attr, typeof(*eattr), attr.attr); + type = eattr->type; + ++ if (!(eattr->model & cmn->model)) ++ return 0; ++ + /* Watchpoints aren't nodes */ + if (type == CMN_TYPE_WP) + type = CMN_TYPE_XP; + + /* Revision-specific differences */ +- if (cmn->rev < CMN600_R1P2) { ++ if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) { + if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b) + return 0; + } +@@ -402,25 +414,27 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + return attr->mode; + } + +-#define _CMN_EVENT_DVM(_name, _event, _occup) \ +- CMN_EVENT_ATTR(dn_##_name, CMN_TYPE_DVM, _event, _occup) ++#define _CMN_EVENT_DVM(_model, _name, _event, _occup) \ ++ CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup) + #define CMN_EVENT_DTC(_name) \ +- CMN_EVENT_ATTR(dtc_##_name, CMN_TYPE_DTC, 0, 0) +-#define _CMN_EVENT_HNF(_name, _event, _occup) \ +- CMN_EVENT_ATTR(hnf_##_name, CMN_TYPE_HNF, _event, _occup) ++ CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0) ++#define _CMN_EVENT_HNF(_model, _name, _event, _occup) \ ++ CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup) + #define CMN_EVENT_HNI(_name, _event) \ +- CMN_EVENT_ATTR(hni_##_name, CMN_TYPE_HNI, _event, 0) ++ CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0) + #define __CMN_EVENT_XP(_name, _event) \ +- CMN_EVENT_ATTR(mxp_##_name, CMN_TYPE_XP, _event, 0) +-#define CMN_EVENT_SBSX(_name, _event) \ +- CMN_EVENT_ATTR(sbsx_##_name, CMN_TYPE_SBSX, _event, 0) +-#define CMN_EVENT_RNID(_name, _event) \ +- CMN_EVENT_ATTR(rnid_##_name, CMN_TYPE_RNI, _event, 0) +- +-#define CMN_EVENT_DVM(_name, _event) \ +- _CMN_EVENT_DVM(_name, _event, 0) +-#define CMN_EVENT_HNF(_name, _event) \ +- _CMN_EVENT_HNF(_name, _event, 0) ++ CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0) ++#define CMN_EVENT_SBSX(_model, _name, _event) \ ++ CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0) ++#define CMN_EVENT_RNID(_model, _name, _event) \ ++ CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0) ++#define CMN_EVENT_MTSX(_name, _event) \ ++ CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0) ++ ++#define CMN_EVENT_DVM(_model, _name, _event) \ ++ _CMN_EVENT_DVM(_model, _name, _event, 0) ++#define CMN_EVENT_HNF(_model, _name, _event) \ ++ _CMN_EVENT_HNF(_model, _name, _event, 0) + #define _CMN_EVENT_XP(_name, _event) \ + __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \ + __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \ +@@ -445,115 +459,115 @@ static struct attribute *arm_cmn_event_attrs[] = { + * slot, but our lazy short-cut of using the DTM counter index for + * the PMU index as well happens to avoid that by construction. + */ +- CMN_EVENT_DVM(rxreq_dvmop, 0x01), +- CMN_EVENT_DVM(rxreq_dvmsync, 0x02), +- CMN_EVENT_DVM(rxreq_dvmop_vmid_filtered, 0x03), +- CMN_EVENT_DVM(rxreq_retried, 0x04), +- _CMN_EVENT_DVM(rxreq_trk_occupancy_all, 0x05, 0), +- _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmop, 0x05, 1), +- _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmsync, 0x05, 2), +- +- CMN_EVENT_HNF(cache_miss, 0x01), +- CMN_EVENT_HNF(slc_sf_cache_access, 0x02), +- CMN_EVENT_HNF(cache_fill, 0x03), +- CMN_EVENT_HNF(pocq_retry, 0x04), +- CMN_EVENT_HNF(pocq_reqs_recvd, 0x05), +- CMN_EVENT_HNF(sf_hit, 0x06), +- CMN_EVENT_HNF(sf_evictions, 0x07), +- CMN_EVENT_HNF(dir_snoops_sent, 0x08), +- CMN_EVENT_HNF(brd_snoops_sent, 0x09), +- CMN_EVENT_HNF(slc_eviction, 0x0a), +- CMN_EVENT_HNF(slc_fill_invalid_way, 0x0b), +- CMN_EVENT_HNF(mc_retries, 0x0c), +- CMN_EVENT_HNF(mc_reqs, 0x0d), +- CMN_EVENT_HNF(qos_hh_retry, 0x0e), +- _CMN_EVENT_HNF(qos_pocq_occupancy_all, 0x0f, 0), +- _CMN_EVENT_HNF(qos_pocq_occupancy_read, 0x0f, 1), +- _CMN_EVENT_HNF(qos_pocq_occupancy_write, 0x0f, 2), +- _CMN_EVENT_HNF(qos_pocq_occupancy_atomic, 0x0f, 3), +- _CMN_EVENT_HNF(qos_pocq_occupancy_stash, 0x0f, 4), +- CMN_EVENT_HNF(pocq_addrhaz, 0x10), +- CMN_EVENT_HNF(pocq_atomic_addrhaz, 0x11), +- CMN_EVENT_HNF(ld_st_swp_adq_full, 0x12), +- CMN_EVENT_HNF(cmp_adq_full, 0x13), +- CMN_EVENT_HNF(txdat_stall, 0x14), +- CMN_EVENT_HNF(txrsp_stall, 0x15), +- CMN_EVENT_HNF(seq_full, 0x16), +- CMN_EVENT_HNF(seq_hit, 0x17), +- CMN_EVENT_HNF(snp_sent, 0x18), +- CMN_EVENT_HNF(sfbi_dir_snp_sent, 0x19), +- CMN_EVENT_HNF(sfbi_brd_snp_sent, 0x1a), +- CMN_EVENT_HNF(snp_sent_untrk, 0x1b), +- CMN_EVENT_HNF(intv_dirty, 0x1c), +- CMN_EVENT_HNF(stash_snp_sent, 0x1d), +- CMN_EVENT_HNF(stash_data_pull, 0x1e), +- CMN_EVENT_HNF(snp_fwded, 0x1f), +- +- CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), +- CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), +- CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22), +- CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23), +- CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24), +- CMN_EVENT_HNI(rrt_rd_alloc, 0x25), +- CMN_EVENT_HNI(rrt_wr_alloc, 0x26), +- CMN_EVENT_HNI(rdt_rd_alloc, 0x27), +- CMN_EVENT_HNI(rdt_wr_alloc, 0x28), +- CMN_EVENT_HNI(wdb_alloc, 0x29), +- CMN_EVENT_HNI(txrsp_retryack, 0x2a), +- CMN_EVENT_HNI(arvalid_no_arready, 0x2b), +- CMN_EVENT_HNI(arready_no_arvalid, 0x2c), +- CMN_EVENT_HNI(awvalid_no_awready, 0x2d), +- CMN_EVENT_HNI(awready_no_awvalid, 0x2e), +- CMN_EVENT_HNI(wvalid_no_wready, 0x2f), +- CMN_EVENT_HNI(txdat_stall, 0x30), +- CMN_EVENT_HNI(nonpcie_serialization, 0x31), +- CMN_EVENT_HNI(pcie_serialization, 0x32), +- +- CMN_EVENT_XP(txflit_valid, 0x01), +- CMN_EVENT_XP(txflit_stall, 0x02), +- CMN_EVENT_XP(partial_dat_flit, 0x03), ++ CMN_EVENT_DVM(CMN600, rxreq_dvmop, 0x01), ++ CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02), ++ CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03), ++ CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04), ++ _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0), ++ _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1), ++ _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2), ++ ++ CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), ++ CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), ++ CMN_EVENT_HNF(CMN_ANY, cache_fill, 0x03), ++ CMN_EVENT_HNF(CMN_ANY, pocq_retry, 0x04), ++ CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd, 0x05), ++ CMN_EVENT_HNF(CMN_ANY, sf_hit, 0x06), ++ CMN_EVENT_HNF(CMN_ANY, sf_evictions, 0x07), ++ CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent, 0x08), ++ CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent, 0x09), ++ CMN_EVENT_HNF(CMN_ANY, slc_eviction, 0x0a), ++ CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way, 0x0b), ++ CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c), ++ CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d), ++ CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e), ++ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0), ++ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1), ++ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2), ++ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3), ++ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4), ++ CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10), ++ CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11), ++ CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12), ++ CMN_EVENT_HNF(CMN_ANY, cmp_adq_full, 0x13), ++ CMN_EVENT_HNF(CMN_ANY, txdat_stall, 0x14), ++ CMN_EVENT_HNF(CMN_ANY, txrsp_stall, 0x15), ++ CMN_EVENT_HNF(CMN_ANY, seq_full, 0x16), ++ CMN_EVENT_HNF(CMN_ANY, seq_hit, 0x17), ++ CMN_EVENT_HNF(CMN_ANY, snp_sent, 0x18), ++ CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent, 0x19), ++ CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent, 0x1a), ++ CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk, 0x1b), ++ CMN_EVENT_HNF(CMN_ANY, intv_dirty, 0x1c), ++ CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d), ++ CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e), ++ CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f), ++ ++ CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), ++ CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), ++ CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22), ++ CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23), ++ CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24), ++ CMN_EVENT_HNI(rrt_rd_alloc, 0x25), ++ CMN_EVENT_HNI(rrt_wr_alloc, 0x26), ++ CMN_EVENT_HNI(rdt_rd_alloc, 0x27), ++ CMN_EVENT_HNI(rdt_wr_alloc, 0x28), ++ CMN_EVENT_HNI(wdb_alloc, 0x29), ++ CMN_EVENT_HNI(txrsp_retryack, 0x2a), ++ CMN_EVENT_HNI(arvalid_no_arready, 0x2b), ++ CMN_EVENT_HNI(arready_no_arvalid, 0x2c), ++ CMN_EVENT_HNI(awvalid_no_awready, 0x2d), ++ CMN_EVENT_HNI(awready_no_awvalid, 0x2e), ++ CMN_EVENT_HNI(wvalid_no_wready, 0x2f), ++ CMN_EVENT_HNI(txdat_stall, 0x30), ++ CMN_EVENT_HNI(nonpcie_serialization, 0x31), ++ CMN_EVENT_HNI(pcie_serialization, 0x32), ++ ++ CMN_EVENT_XP(txflit_valid, 0x01), ++ CMN_EVENT_XP(txflit_stall, 0x02), ++ CMN_EVENT_XP(partial_dat_flit, 0x03), + /* We treat watchpoints as a special made-up class of XP events */ +- CMN_EVENT_ATTR(watchpoint_up, CMN_TYPE_WP, 0, 0), +- CMN_EVENT_ATTR(watchpoint_down, CMN_TYPE_WP, 2, 0), +- +- CMN_EVENT_SBSX(rd_req, 0x01), +- CMN_EVENT_SBSX(wr_req, 0x02), +- CMN_EVENT_SBSX(cmo_req, 0x03), +- CMN_EVENT_SBSX(txrsp_retryack, 0x04), +- CMN_EVENT_SBSX(txdat_flitv, 0x05), +- CMN_EVENT_SBSX(txrsp_flitv, 0x06), +- CMN_EVENT_SBSX(rd_req_trkr_occ_cnt_ovfl, 0x11), +- CMN_EVENT_SBSX(wr_req_trkr_occ_cnt_ovfl, 0x12), +- CMN_EVENT_SBSX(cmo_req_trkr_occ_cnt_ovfl, 0x13), +- CMN_EVENT_SBSX(wdb_occ_cnt_ovfl, 0x14), +- CMN_EVENT_SBSX(rd_axi_trkr_occ_cnt_ovfl, 0x15), +- CMN_EVENT_SBSX(cmo_axi_trkr_occ_cnt_ovfl, 0x16), +- CMN_EVENT_SBSX(arvalid_no_arready, 0x21), +- CMN_EVENT_SBSX(awvalid_no_awready, 0x22), +- CMN_EVENT_SBSX(wvalid_no_wready, 0x23), +- CMN_EVENT_SBSX(txdat_stall, 0x24), +- CMN_EVENT_SBSX(txrsp_stall, 0x25), +- +- CMN_EVENT_RNID(s0_rdata_beats, 0x01), +- CMN_EVENT_RNID(s1_rdata_beats, 0x02), +- CMN_EVENT_RNID(s2_rdata_beats, 0x03), +- CMN_EVENT_RNID(rxdat_flits, 0x04), +- CMN_EVENT_RNID(txdat_flits, 0x05), +- CMN_EVENT_RNID(txreq_flits_total, 0x06), +- CMN_EVENT_RNID(txreq_flits_retried, 0x07), +- CMN_EVENT_RNID(rrt_occ_ovfl, 0x08), +- CMN_EVENT_RNID(wrt_occ_ovfl, 0x09), +- CMN_EVENT_RNID(txreq_flits_replayed, 0x0a), +- CMN_EVENT_RNID(wrcancel_sent, 0x0b), +- CMN_EVENT_RNID(s0_wdata_beats, 0x0c), +- CMN_EVENT_RNID(s1_wdata_beats, 0x0d), +- CMN_EVENT_RNID(s2_wdata_beats, 0x0e), +- CMN_EVENT_RNID(rrt_alloc, 0x0f), +- CMN_EVENT_RNID(wrt_alloc, 0x10), +- CMN_EVENT_RNID(rdb_unord, 0x11), +- CMN_EVENT_RNID(rdb_replay, 0x12), +- CMN_EVENT_RNID(rdb_hybrid, 0x13), +- CMN_EVENT_RNID(rdb_ord, 0x14), ++ CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0), ++ CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0), ++ ++ CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01), ++ CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02), ++ CMN_EVENT_SBSX(CMN_ANY, cmo_req, 0x03), ++ CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack, 0x04), ++ CMN_EVENT_SBSX(CMN_ANY, txdat_flitv, 0x05), ++ CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv, 0x06), ++ CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11), ++ CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12), ++ CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13), ++ CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14), ++ CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15), ++ CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16), ++ CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21), ++ CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22), ++ CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23), ++ CMN_EVENT_SBSX(CMN_ANY, txdat_stall, 0x24), ++ CMN_EVENT_SBSX(CMN_ANY, txrsp_stall, 0x25), ++ ++ CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats, 0x01), ++ CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats, 0x02), ++ CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats, 0x03), ++ CMN_EVENT_RNID(CMN_ANY, rxdat_flits, 0x04), ++ CMN_EVENT_RNID(CMN_ANY, txdat_flits, 0x05), ++ CMN_EVENT_RNID(CMN_ANY, txreq_flits_total, 0x06), ++ CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried, 0x07), ++ CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl, 0x08), ++ CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl, 0x09), ++ CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed, 0x0a), ++ CMN_EVENT_RNID(CMN_ANY, wrcancel_sent, 0x0b), ++ CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats, 0x0c), ++ CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats, 0x0d), ++ CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats, 0x0e), ++ CMN_EVENT_RNID(CMN_ANY, rrt_alloc, 0x0f), ++ CMN_EVENT_RNID(CMN_ANY, wrt_alloc, 0x10), ++ CMN_EVENT_RNID(CMN600, rdb_unord, 0x11), ++ CMN_EVENT_RNID(CMN600, rdb_replay, 0x12), ++ CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13), ++ CMN_EVENT_RNID(CMN600, rdb_ord, 0x14), + + NULL + }; +@@ -1386,15 +1400,14 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + int i, j; + size_t sz; + +- cfg_region = cmn->base + rgn_offset; +- reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); +- cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); +- dev_dbg(cmn->dev, "periph_id_2 revision: %d\n", cmn->rev); +- + arm_cmn_init_node_info(cmn, rgn_offset, &cfg); + if (cfg.type != CMN_TYPE_CFG) + return -ENODEV; + ++ cfg_region = cmn->base + rgn_offset; ++ reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); ++ cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); ++ + reg = readq_relaxed(cfg_region + CMN_CHILD_INFO); + child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); + child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); +@@ -1507,13 +1520,14 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + cmn->mesh_x = cmn->num_xps; + cmn->mesh_y = cmn->num_xps / cmn->mesh_x; + ++ dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev); + dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n", + cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn)); + + return 0; + } + +-static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) ++static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) + { + struct resource *cfg, *root; + +@@ -1540,21 +1554,11 @@ static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) + return root->start - cfg->start; + } + +-static int arm_cmn_of_probe(struct platform_device *pdev, struct arm_cmn *cmn) ++static int arm_cmn600_of_probe(struct device_node *np) + { +- struct device_node *np = pdev->dev.of_node; + u32 rootnode; +- int ret; + +- cmn->base = devm_platform_ioremap_resource(pdev, 0); +- if (IS_ERR(cmn->base)) +- return PTR_ERR(cmn->base); +- +- ret = of_property_read_u32(np, "arm,root-node", &rootnode); +- if (ret) +- return ret; +- +- return rootnode; ++ return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode; + } + + static int arm_cmn_probe(struct platform_device *pdev) +@@ -1569,12 +1573,19 @@ static int arm_cmn_probe(struct platform_device *pdev) + return -ENOMEM; + + cmn->dev = &pdev->dev; ++ cmn->model = (unsigned long)device_get_match_data(cmn->dev); + platform_set_drvdata(pdev, cmn); + +- if (has_acpi_companion(cmn->dev)) +- rootnode = arm_cmn_acpi_probe(pdev, cmn); +- else +- rootnode = arm_cmn_of_probe(pdev, cmn); ++ if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) { ++ rootnode = arm_cmn600_acpi_probe(pdev, cmn); ++ } else { ++ rootnode = 0; ++ cmn->base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(cmn->base)) ++ return PTR_ERR(cmn->base); ++ if (cmn->model == CMN600) ++ rootnode = arm_cmn600_of_probe(pdev->dev.of_node); ++ } + if (rootnode < 0) + return rootnode; + +@@ -1637,7 +1648,7 @@ static int arm_cmn_remove(struct platform_device *pdev) + + #ifdef CONFIG_OF + static const struct of_device_id arm_cmn_of_match[] = { +- { .compatible = "arm,cmn-600", }, ++ { .compatible = "arm,cmn-600", .data = (void *)CMN600 }, + {} + }; + MODULE_DEVICE_TABLE(of, arm_cmn_of_match); +@@ -1645,7 +1656,7 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match); + + #ifdef CONFIG_ACPI + static const struct acpi_device_id arm_cmn_acpi_match[] = { +- { "ARMHC600", }, ++ { "ARMHC600", CMN600 }, + {} + }; + MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0029-perf-arm-cmn-Support-new-IP-features.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0029-perf-arm-cmn-Support-new-IP-features.patch new file mode 100644 index 0000000000..fa0c5d9d16 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0029-perf-arm-cmn-Support-new-IP-features.patch @@ -0,0 +1,549 @@ +From a10c446ba1f7516c16dd6400c9a7f5e203779a5d Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:45:00 +0000 +Subject: [PATCH 13/14] perf/arm-cmn: Support new IP features + +The second generation of CMN IPs add new node types and significantly +expand the configuration space with options for extra device ports on +edge XPs, either plumbed into the regular DTM or with extra dedicated +DTMs to monitor them, plus larger (and smaller) mesh sizes. Add basic +support for pulling this new information out of the hardware, piping +it around as necessary, and handling (most of) the new choices. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/e58b495bcc7deec3882be4bac910ed0bf6979674.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 222 ++++++++++++++++++++++++++++++++--------- + 1 file changed, 173 insertions(+), 49 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 92ff273fbe58..871c86687379 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -24,7 +24,10 @@ + #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32) + + #define CMN_NODEID_DEVID(reg) ((reg) & 3) ++#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) + #define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) ++#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) ++#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) + #define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) + #define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) + +@@ -37,13 +40,26 @@ + + #define CMN_MAX_DIMENSION 8 + #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) +-#define CMN_MAX_DTMS CMN_MAX_XPS ++#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) + +-/* The CFG node has one other useful purpose */ ++/* The CFG node has various info besides the discovery tree */ + #define CMN_CFGM_PERIPH_ID_2 0x0010 + #define CMN_CFGM_PID2_REVISION GENMASK(7, 4) + +-/* PMU registers occupy the 3rd 4KB page of each node's 16KB space */ ++#define CMN_CFGM_INFO_GLOBAL 0x900 ++#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63) ++#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52) ++#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50) ++ ++/* XPs also have some local topology info which has uses too */ ++#define CMN_MXP__CONNECT_INFO_P0 0x0008 ++#define CMN_MXP__CONNECT_INFO_P1 0x0010 ++#define CMN_MXP__CONNECT_INFO_P2 0x0028 ++#define CMN_MXP__CONNECT_INFO_P3 0x0030 ++#define CMN_MXP__CONNECT_INFO_P4 0x0038 ++#define CMN_MXP__CONNECT_INFO_P5 0x0040 ++ ++/* PMU registers occupy the 3rd 4KB page of each node's region */ + #define CMN_PMU_OFFSET 0x2000 + + /* For most nodes, this is all there is */ +@@ -53,6 +69,7 @@ + /* DTMs live in the PMU space of XP registers */ + #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) + #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00) ++#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17) + #define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6) + #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5) + #define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4) +@@ -77,7 +94,11 @@ + + #define CMN_DTM_PMEVCNTSR 0x240 + ++#define CMN_DTM_UNIT_INFO 0x0910 ++ + #define CMN_DTM_NUM_COUNTERS 4 ++/* Want more local counters? Why not replicate the whole DTM! Ugh... */ ++#define CMN_DTM_OFFSET(n) ((n) * 0x200) + + /* The DTC node is where the magic happens */ + #define CMN_DT_DTC_CTL 0x0a00 +@@ -131,10 +152,10 @@ + #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) + + #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24) +-#define CMN_CONFIG_WP_DEV_SEL BIT_ULL(48) +-#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(50, 49) +-#define CMN_CONFIG_WP_GRP BIT_ULL(52) +-#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(53) ++#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) ++#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) ++#define CMN_CONFIG_WP_GRP BIT_ULL(56) ++#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57) + #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0) + #define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0) + +@@ -176,9 +197,12 @@ enum cmn_node_type { + CMN_TYPE_HNF, + CMN_TYPE_XP, + CMN_TYPE_SBSX, +- CMN_TYPE_RNI = 0xa, ++ CMN_TYPE_MPAM_S, ++ CMN_TYPE_MPAM_NS, ++ CMN_TYPE_RNI, + CMN_TYPE_RND = 0xd, + CMN_TYPE_RNSAM = 0xf, ++ CMN_TYPE_MTSX, + CMN_TYPE_CXRA = 0x100, + CMN_TYPE_CXHA = 0x101, + CMN_TYPE_CXLA = 0x102, +@@ -233,6 +257,7 @@ struct arm_cmn_dtc { + struct arm_cmn { + struct device *dev; + void __iomem *base; ++ unsigned int state; + + enum cmn_revision rev; + enum cmn_model model; +@@ -240,6 +265,13 @@ struct arm_cmn { + u8 mesh_y; + u16 num_xps; + u16 num_dns; ++ bool multi_dtm; ++ u8 ports_used; ++ struct { ++ unsigned int rsp_vc_num : 2; ++ unsigned int dat_vc_num : 2; ++ }; ++ + struct arm_cmn_node *xps; + struct arm_cmn_node *dns; + +@@ -250,7 +282,6 @@ struct arm_cmn { + int cpu; + struct hlist_node cpuhp_node; + +- unsigned int state; + struct pmu pmu; + }; + +@@ -275,13 +306,25 @@ static int arm_cmn_xyidbits(const struct arm_cmn *cmn) + static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) + { + struct arm_cmn_nodeid nid; +- int bits = arm_cmn_xyidbits(cmn); + +- nid.x = CMN_NODEID_X(id, bits); +- nid.y = CMN_NODEID_Y(id, bits); +- nid.port = CMN_NODEID_PID(id); +- nid.dev = CMN_NODEID_DEVID(id); ++ if (cmn->num_xps == 1) { ++ nid.x = 0; ++ nid.y = 0; ++ nid.port = CMN_NODEID_1x1_PID(id); ++ nid.dev = CMN_NODEID_DEVID(id); ++ } else { ++ int bits = arm_cmn_xyidbits(cmn); + ++ nid.x = CMN_NODEID_X(id, bits); ++ nid.y = CMN_NODEID_Y(id, bits); ++ if (cmn->ports_used & 0xc) { ++ nid.port = CMN_NODEID_EXT_PID(id); ++ nid.dev = CMN_NODEID_EXT_DEVID(id); ++ } else { ++ nid.port = CMN_NODEID_PID(id); ++ nid.dev = CMN_NODEID_DEVID(id); ++ } ++ } + return nid; + } + +@@ -310,6 +353,7 @@ struct arm_cmn_hw_event { + unsigned int dtc_idx; + u8 dtcs_used; + u8 num_dns; ++ u8 dtm_offset; + }; + + #define for_each_hw_dn(hw, dn, i) \ +@@ -354,7 +398,8 @@ struct arm_cmn_format_attr { + .occupid = _occupid, \ + }})[0].attr.attr) + +-static bool arm_cmn_is_occup_event(enum cmn_node_type type, unsigned int id) ++static bool arm_cmn_is_occup_event(enum cmn_model model, ++ enum cmn_node_type type, unsigned int id) + { + return (type == CMN_TYPE_DVM && id == 0x05) || + (type == CMN_TYPE_HNF && id == 0x0f); +@@ -375,9 +420,9 @@ static ssize_t arm_cmn_event_show(struct device *dev, + "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n", + eattr->type, eattr->eventid); + +- if (arm_cmn_is_occup_event(eattr->type, eattr->eventid)) +- return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n", +- eattr->type, eattr->eventid, eattr->occupid); ++ if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid)) ++ return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n", ++ eattr->type, eattr->eventid, eattr->occupid); + + return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n", + eattr->type, eattr->eventid); +@@ -390,25 +435,36 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + struct device *dev = kobj_to_dev(kobj); + struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); + struct arm_cmn_event_attr *eattr; +- enum cmn_node_type type; + + eattr = container_of(attr, typeof(*eattr), attr.attr); +- type = eattr->type; + + if (!(eattr->model & cmn->model)) + return 0; + +- /* Watchpoints aren't nodes */ +- if (type == CMN_TYPE_WP) +- type = CMN_TYPE_XP; ++ /* Watchpoints aren't nodes, so avoid confusion */ ++ if (eattr->type == CMN_TYPE_WP) ++ return attr->mode; ++ ++ /* Hide XP events for unused interfaces/channels */ ++ if (eattr->type == CMN_TYPE_XP) { ++ unsigned int intf = (eattr->eventid >> 2) & 7; ++ unsigned int chan = eattr->eventid >> 5; ++ ++ if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) ++ return 0; ++ ++ if ((chan == 5 && cmn->rsp_vc_num < 2) || ++ (chan == 6 && cmn->dat_vc_num < 2)) ++ return 0; ++ } + + /* Revision-specific differences */ + if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) { +- if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b) ++ if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b) + return 0; + } + +- if (!arm_cmn_node(cmn, type)) ++ if (!arm_cmn_node(cmn, eattr->type)) + return 0; + + return attr->mode; +@@ -669,7 +725,8 @@ static u32 arm_cmn_wp_config(struct perf_event *event) + config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) | +- FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc); ++ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) | ++ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1); + if (combine && !grp) + config |= CMN_DTM_WPn_CONFIG_WP_COMBINE; + +@@ -712,7 +769,7 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, + offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT; + for_each_hw_dn(hw, dn, i) { + if (dtm != &cmn->dtms[dn->dtm]) { +- dtm = &cmn->dtms[dn->dtm]; ++ dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; + reg = readq_relaxed(dtm->base + offset); + } + dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); +@@ -800,8 +857,10 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) + u64 mask = CMN_EVENT_WP_MASK(event); + + for_each_hw_dn(hw, dn, i) { +- writeq_relaxed(val, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx)); +- writeq_relaxed(mask, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx)); ++ void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); ++ ++ writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx)); ++ writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx)); + } + } else for_each_hw_dn(hw, dn, i) { + int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); +@@ -826,8 +885,10 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) + int wp_idx = arm_cmn_wp_idx(event); + + for_each_hw_dn(hw, dn, i) { +- writeq_relaxed(0, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx)); +- writeq_relaxed(~0ULL, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx)); ++ void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); ++ ++ writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx)); ++ writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx)); + } + } else for_each_hw_dn(hw, dn, i) { + int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); +@@ -847,7 +908,8 @@ struct arm_cmn_val { + bool cycles; + }; + +-static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *event) ++static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, ++ struct perf_event *event) + { + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; +@@ -865,7 +927,7 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev + } + + val->dtc_count++; +- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) ++ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) + occupid = CMN_EVENT_OCCUPID(event) + 1; + else + occupid = 0; +@@ -884,7 +946,7 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev + } + } + +-static int arm_cmn_validate_group(struct perf_event *event) ++static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) + { + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; +@@ -904,9 +966,9 @@ static int arm_cmn_validate_group(struct perf_event *event) + if (!val) + return -ENOMEM; + +- arm_cmn_val_add_event(val, leader); ++ arm_cmn_val_add_event(cmn, val, leader); + for_each_sibling_event(sibling, leader) +- arm_cmn_val_add_event(val, sibling); ++ arm_cmn_val_add_event(cmn, val, sibling); + + type = CMN_EVENT_TYPE(event); + if (type == CMN_TYPE_DTC) { +@@ -917,7 +979,7 @@ static int arm_cmn_validate_group(struct perf_event *event) + if (val->dtc_count == CMN_DT_NUM_COUNTERS) + goto done; + +- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) ++ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) + occupid = CMN_EVENT_OCCUPID(event) + 1; + else + occupid = 0; +@@ -980,6 +1042,9 @@ static int arm_cmn_event_init(struct perf_event *event) + eventid = CMN_EVENT_EVENTID(event); + if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN) + return -EINVAL; ++ /* ...but the DTM may depend on which port we're watching */ ++ if (cmn->multi_dtm) ++ hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; + } + + bynodeid = CMN_EVENT_BYNODEID(event); +@@ -1007,7 +1072,7 @@ static int arm_cmn_event_init(struct perf_event *event) + return -EINVAL; + } + +- return arm_cmn_validate_group(event); ++ return arm_cmn_validate_group(cmn, event); + } + + static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, +@@ -1017,13 +1082,13 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, + enum cmn_node_type type = CMN_EVENT_TYPE(event); + + while (i--) { +- struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm]; ++ struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; + unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + + if (type == CMN_TYPE_WP) + dtm->wp_event[arm_cmn_wp_idx(event)] = -1; + +- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) ++ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) + hw->dn[i].occupid_count--; + + dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); +@@ -1069,7 +1134,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + + /* ...then the local counters to feed it. */ + for_each_hw_dn(hw, dn, i) { +- struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm]; ++ struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; + unsigned int dtm_idx, shift; + u64 reg; + +@@ -1098,10 +1163,13 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) + } else { + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + ++ if (cmn->multi_dtm) ++ nid.port %= 2; ++ + input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx + + (nid.port << 4) + (nid.dev << 2); + +- if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) { ++ if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) { + u8 occupid = CMN_EVENT_OCCUPID(event); + + if (dn->occupid_count == 0) { +@@ -1283,11 +1351,11 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn) + return 0; + } + +-static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp) ++static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx) + { + int i; + +- dtm->base = xp->pmu_base; ++ dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx); + dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; + for (i = 0; i < 4; i++) { + dtm->wp_event[i] = -1; +@@ -1345,6 +1413,8 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) + + xp = arm_cmn_node_to_xp(cmn, dn); + dn->dtm = xp->dtm; ++ if (cmn->multi_dtm) ++ dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; + + if (dn->type == CMN_TYPE_DTC) { + int err; +@@ -1408,6 +1478,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); + cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); + ++ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL); ++ cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN; ++ cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); ++ cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); ++ + reg = readq_relaxed(cfg_region + CMN_CHILD_INFO); + child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); + child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); +@@ -1429,7 +1504,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + if (!dn) + return -ENOMEM; + +- dtm = devm_kcalloc(cmn->dev, cmn->num_xps, sizeof(*dtm), GFP_KERNEL); ++ /* Initial safe upper bound on DTMs for any possible mesh layout */ ++ i = cmn->num_xps; ++ if (cmn->multi_dtm) ++ i += cmn->num_xps + 1; ++ dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL); + if (!dtm) + return -ENOMEM; + +@@ -1439,6 +1518,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + for (i = 0; i < cmn->num_xps; i++) { + void __iomem *xp_region = cmn->base + xp_offset[i]; + struct arm_cmn_node *xp = dn++; ++ unsigned int xp_ports = 0; + + arm_cmn_init_node_info(cmn, xp_offset[i], xp); + /* +@@ -1450,9 +1530,39 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + if (xp->id == (1 << 3)) + cmn->mesh_x = xp->logid; + +- xp->dtc = 0xf; ++ if (cmn->model == CMN600) ++ xp->dtc = 0xf; ++ else ++ xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO); ++ + xp->dtm = dtm - cmn->dtms; +- arm_cmn_init_dtm(dtm++, xp); ++ arm_cmn_init_dtm(dtm++, xp, 0); ++ /* ++ * Keeping track of connected ports will let us filter out ++ * unnecessary XP events easily. We can also reliably infer the ++ * "extra device ports" configuration for the node ID format ++ * from this, since in that case we will see at least one XP ++ * with port 2 connected, for the HN-D. ++ */ ++ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0)) ++ xp_ports |= BIT(0); ++ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1)) ++ xp_ports |= BIT(1); ++ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2)) ++ xp_ports |= BIT(2); ++ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3)) ++ xp_ports |= BIT(3); ++ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4)) ++ xp_ports |= BIT(4); ++ if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5)) ++ xp_ports |= BIT(5); ++ ++ if (cmn->multi_dtm && (xp_ports & 0xc)) ++ arm_cmn_init_dtm(dtm++, xp, 1); ++ if (cmn->multi_dtm && (xp_ports & 0x30)) ++ arm_cmn_init_dtm(dtm++, xp, 2); ++ ++ cmn->ports_used |= xp_ports; + + reg = readq_relaxed(xp_region + CMN_CHILD_INFO); + child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); +@@ -1488,11 +1598,14 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + case CMN_TYPE_SBSX: + case CMN_TYPE_RNI: + case CMN_TYPE_RND: ++ case CMN_TYPE_MTSX: + case CMN_TYPE_CXRA: + case CMN_TYPE_CXHA: + dn++; + break; + /* Nothing to see here */ ++ case CMN_TYPE_MPAM_S: ++ case CMN_TYPE_MPAM_NS: + case CMN_TYPE_RNSAM: + case CMN_TYPE_CXLA: + break; +@@ -1512,6 +1625,11 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + if (dn) + cmn->dns = dn; + ++ sz = (void *)dtm - (void *)cmn->dtms; ++ dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL); ++ if (dtm) ++ cmn->dtms = dtm; ++ + /* + * If mesh_x wasn't set during discovery then we never saw + * an XP at (0,1), thus we must have an Nx1 configuration. +@@ -1520,9 +1638,15 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) + cmn->mesh_x = cmn->num_xps; + cmn->mesh_y = cmn->num_xps / cmn->mesh_x; + ++ /* 1x1 config plays havoc with XP event encodings */ ++ if (cmn->num_xps == 1) ++ dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n"); ++ + dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev); +- dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n", +- cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn)); ++ reg = cmn->ports_used; ++ dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n", ++ cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), ®, ++ cmn->multi_dtm ? ", multi-DTM" : ""); + + return 0; + } +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0030-perf-arm-cmn-Add-CI-700-Support.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0030-perf-arm-cmn-Add-CI-700-Support.patch new file mode 100644 index 0000000000..a12911a0d7 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0030-perf-arm-cmn-Add-CI-700-Support.patch @@ -0,0 +1,150 @@ +From f5d0979c2385c3ef43d6f2af07c14ee897835e2c Mon Sep 17 00:00:00 2001 +From: Robin Murphy <robin.murphy@arm.com> +Date: Fri, 3 Dec 2021 11:45:02 +0000 +Subject: [PATCH 14/14] perf/arm-cmn: Add CI-700 Support + +Add the identifiers and events for the CI-700 coherent interconnect. + +Signed-off-by: Robin Murphy <robin.murphy@arm.com> +Link: https://lore.kernel.org/r/28f566ab23a83733c6c9ef9414c010b760b4549c.1638530442.git.robin.murphy@arm.com +Signed-off-by: Will Deacon <will@kernel.org> + +Upstream-Status: Backport [https://lore.kernel.org/r/28f566ab23a83733c6c9ef9414c010b760b4549c.1638530442.git.robin.murphy@arm.com] +Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com> +--- + drivers/perf/arm-cmn.c | 57 +++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 53 insertions(+), 4 deletions(-) + +diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c +index 871c86687379..e0f78b6c643c 100644 +--- a/drivers/perf/arm-cmn.c ++++ b/drivers/perf/arm-cmn.c +@@ -175,6 +175,7 @@ + enum cmn_model { + CMN_ANY = -1, + CMN600 = 1, ++ CI700 = 2, + }; + + /* CMN-600 r0px shouldn't exist in silicon, thankfully */ +@@ -186,6 +187,9 @@ enum cmn_revision { + CMN600_R2P0, + CMN600_R3P0, + CMN600_R3P1, ++ CI700_R0P0 = 0, ++ CI700_R1P0, ++ CI700_R2P0, + }; + + enum cmn_node_type { +@@ -401,8 +405,10 @@ struct arm_cmn_format_attr { + static bool arm_cmn_is_occup_event(enum cmn_model model, + enum cmn_node_type type, unsigned int id) + { +- return (type == CMN_TYPE_DVM && id == 0x05) || +- (type == CMN_TYPE_HNF && id == 0x0f); ++ if (type == CMN_TYPE_DVM) ++ return (model == CMN600 && id == 0x05) || ++ (model == CI700 && id == 0x0c); ++ return type == CMN_TYPE_HNF && id == 0x0f; + } + + static ssize_t arm_cmn_event_show(struct device *dev, +@@ -497,14 +503,19 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \ + __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)), \ + __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \ +- __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)) ++ __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \ ++ __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \ ++ __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2)) + + /* Good thing there are only 3 fundamental XP events... */ + #define CMN_EVENT_XP(_name, _event) \ + _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \ + _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \ + _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \ +- _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)) ++ _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \ ++ _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ ++ _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ ++ _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)) + + + static struct attribute *arm_cmn_event_attrs[] = { +@@ -522,6 +533,20 @@ static struct attribute *arm_cmn_event_attrs[] = { + _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0), + _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1), + _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2), ++ CMN_EVENT_DVM(CI700, dvmop_tlbi, 0x01), ++ CMN_EVENT_DVM(CI700, dvmop_bpi, 0x02), ++ CMN_EVENT_DVM(CI700, dvmop_pici, 0x03), ++ CMN_EVENT_DVM(CI700, dvmop_vici, 0x04), ++ CMN_EVENT_DVM(CI700, dvmsync, 0x05), ++ CMN_EVENT_DVM(CI700, vmid_filtered, 0x06), ++ CMN_EVENT_DVM(CI700, rndop_filtered, 0x07), ++ CMN_EVENT_DVM(CI700, retry, 0x08), ++ CMN_EVENT_DVM(CI700, txsnp_flitv, 0x09), ++ CMN_EVENT_DVM(CI700, txsnp_stall, 0x0a), ++ CMN_EVENT_DVM(CI700, trkfull, 0x0b), ++ _CMN_EVENT_DVM(CI700, trk_occupancy_all, 0x0c, 0), ++ _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop, 0x0c, 1), ++ _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync, 0x0c, 2), + + CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), + CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), +@@ -558,6 +583,9 @@ static struct attribute *arm_cmn_event_attrs[] = { + CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d), + CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e), + CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f), ++ CMN_EVENT_HNF(CI700, atomic_fwd, 0x20), ++ CMN_EVENT_HNF(CI700, mpam_hardlim, 0x21), ++ CMN_EVENT_HNF(CI700, mpam_softlim, 0x22), + + CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), + CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), +@@ -598,6 +626,7 @@ static struct attribute *arm_cmn_event_attrs[] = { + CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14), + CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15), + CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16), ++ CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl, 0x17), + CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21), + CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22), + CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23), +@@ -624,6 +653,25 @@ static struct attribute *arm_cmn_event_attrs[] = { + CMN_EVENT_RNID(CMN600, rdb_replay, 0x12), + CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13), + CMN_EVENT_RNID(CMN600, rdb_ord, 0x14), ++ CMN_EVENT_RNID(CI700, padb_occ_ovfl, 0x11), ++ CMN_EVENT_RNID(CI700, rpdb_occ_ovfl, 0x12), ++ CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1, 0x13), ++ CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2, 0x14), ++ CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3, 0x15), ++ CMN_EVENT_RNID(CI700, wrt_throttled, 0x16), ++ ++ CMN_EVENT_MTSX(tc_lookup, 0x01), ++ CMN_EVENT_MTSX(tc_fill, 0x02), ++ CMN_EVENT_MTSX(tc_miss, 0x03), ++ CMN_EVENT_MTSX(tdb_forward, 0x04), ++ CMN_EVENT_MTSX(tcq_hazard, 0x05), ++ CMN_EVENT_MTSX(tcq_rd_alloc, 0x06), ++ CMN_EVENT_MTSX(tcq_wr_alloc, 0x07), ++ CMN_EVENT_MTSX(tcq_cmo_alloc, 0x08), ++ CMN_EVENT_MTSX(axi_rd_req, 0x09), ++ CMN_EVENT_MTSX(axi_wr_req, 0x0a), ++ CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b), ++ CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c), + + NULL + }; +@@ -1773,6 +1821,7 @@ static int arm_cmn_remove(struct platform_device *pdev) + #ifdef CONFIG_OF + static const struct of_device_id arm_cmn_of_match[] = { + { .compatible = "arm,cmn-600", .data = (void *)CMN600 }, ++ { .compatible = "arm,ci-700", .data = (void *)CI700 }, + {} + }; + MODULE_DEVICE_TABLE(of, arm_cmn_of_match); +-- +2.25.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0031-firmware-arm_ffa-Fix-uuid-argument-passed-to-ffa_par.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0031-firmware-arm_ffa-Fix-uuid-argument-passed-to-ffa_par.patch new file mode 100644 index 0000000000..35b4f10b55 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0031-firmware-arm_ffa-Fix-uuid-argument-passed-to-ffa_par.patch @@ -0,0 +1,29 @@ +From 4d0a8147477699d40a02f121e7c72b21547273cf Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Thu, 13 Jan 2022 20:14:25 +0000 +Subject: [PATCH 19/32] firmware: arm_ffa: Fix uuid argument passed to + ffa_partition_probe + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Ib2749ec3e02da5bb6d835f7dbf2d608c41fad1f2 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/firmware/arm_ffa/driver.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c +index 14f900047ac0..8fa1785afd42 100644 +--- a/drivers/firmware/arm_ffa/driver.c ++++ b/drivers/firmware/arm_ffa/driver.c +@@ -582,7 +582,7 @@ static int ffa_partition_info_get(const char *uuid_str, + return -ENODEV; + } + +- count = ffa_partition_probe(&uuid_null, &pbuf); ++ count = ffa_partition_probe(&uuid, &pbuf); + if (count <= 0) + return -ENOENT; + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0032-firmware-arm_ffa-Add-ffa_dev_get_drvdata.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0032-firmware-arm_ffa-Add-ffa_dev_get_drvdata.patch new file mode 100644 index 0000000000..52cf71be6d --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0032-firmware-arm_ffa-Add-ffa_dev_get_drvdata.patch @@ -0,0 +1,33 @@ +From 9acd4425667e240603ec196d8b64b2b25879805e Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Thu, 13 Jan 2022 22:22:28 +0000 +Subject: [PATCH 20/32] firmware: arm_ffa: Add ffa_dev_get_drvdata + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Icd09d686cab9922563b1deda5276307ea5d94923 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + include/linux/arm_ffa.h | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h +index 85651e41ded8..e5c76c1ef9ed 100644 +--- a/include/linux/arm_ffa.h ++++ b/include/linux/arm_ffa.h +@@ -38,7 +38,12 @@ struct ffa_driver { + + static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data) + { +- fdev->dev.driver_data = data; ++ dev_set_drvdata(&fdev->dev, data); ++} ++ ++static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev) ++{ ++ return dev_get_drvdata(&fdev->dev); + } + + #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0033-firmware-arm_ffa-extern-ffa_bus_type.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0033-firmware-arm_ffa-extern-ffa_bus_type.patch new file mode 100644 index 0000000000..bbbc1783e7 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0033-firmware-arm_ffa-extern-ffa_bus_type.patch @@ -0,0 +1,30 @@ +From 7a9298916fe892ddac5fe4e0a13a566b1636f542 Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Thu, 13 Jan 2022 22:23:52 +0000 +Subject: [PATCH 21/32] firmware: arm_ffa: extern ffa_bus_type + +extern ffa_bus_type so that SP driver can use it in bus_find_device call. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Ib7a6a563aa35627a545f82c796816a5f72c80d70 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + include/linux/arm_ffa.h | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h +index e5c76c1ef9ed..4eb7e03ca560 100644 +--- a/include/linux/arm_ffa.h ++++ b/include/linux/arm_ffa.h +@@ -88,6 +88,8 @@ const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev) + #define ffa_unregister(driver) \ + ffa_driver_unregister(driver) + ++extern struct bus_type ffa_bus_type; ++ + /** + * module_ffa_driver() - Helper macro for registering a psa_ffa driver + * @__ffa_driver: ffa_driver structure +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0034-firmware-arm_ffa-Fix-FFA_MEM_SHARE-and-FFA_MEM_FRAG_.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0034-firmware-arm_ffa-Fix-FFA_MEM_SHARE-and-FFA_MEM_FRAG_.patch new file mode 100644 index 0000000000..977b550c93 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0034-firmware-arm_ffa-Fix-FFA_MEM_SHARE-and-FFA_MEM_FRAG_.patch @@ -0,0 +1,54 @@ +From e0b9971db819fb9ed9b08a5d3f6f2a4565e92a1a Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Fri, 14 Jan 2022 12:23:04 +0000 +Subject: [PATCH 22/32] firmware: arm_ffa: Fix FFA_MEM_SHARE and + FFA_MEM_FRAG_TX + +FFA memory share on success might return FFA_MEM_FRAG_RX. In that case +set handle from w1/w2 from FFA return value. + +FFA_MEM_FRAG_TX call will return FFA_SUCCESS for the last fragment, so +check for this return code. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: I7ef44742d53a9e75d8587d1213be98a1352f16d4 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/firmware/arm_ffa/driver.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c +index 8fa1785afd42..a3b1df6d7f3c 100644 +--- a/drivers/firmware/arm_ffa/driver.c ++++ b/drivers/firmware/arm_ffa/driver.c +@@ -398,11 +398,15 @@ static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + +- if (ret.a0 != FFA_SUCCESS) ++ if (ret.a0 != FFA_SUCCESS && ret.a0 != FFA_MEM_FRAG_RX) + return -EOPNOTSUPP; + +- if (handle) +- *handle = PACK_HANDLE(ret.a2, ret.a3); ++ if (handle) { ++ if (ret.a0 == FFA_MEM_FRAG_RX) ++ *handle = PACK_HANDLE(ret.a1, ret.a2); ++ else ++ *handle = PACK_HANDLE(ret.a2, ret.a3); ++ } + + return frag_len; + } +@@ -426,7 +430,7 @@ static int ffa_mem_next_frag(u64 handle, u32 frag_len) + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + +- if (ret.a0 != FFA_MEM_FRAG_RX) ++ if (ret.a0 != FFA_SUCCESS && ret.a0 != FFA_MEM_FRAG_RX) + return -EOPNOTSUPP; + + return ret.a3; +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0035-ANDROID-trusty-Backport-of-trusty-driver.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0035-ANDROID-trusty-Backport-of-trusty-driver.patch new file mode 100644 index 0000000000..290de51118 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0035-ANDROID-trusty-Backport-of-trusty-driver.patch @@ -0,0 +1,8099 @@ +From 3e1e61f54538e8ce4bcbb5a9a213624eafcae514 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Mon, 18 Nov 2013 20:46:48 -0800 +Subject: [PATCH 18/32] ANDROID: trusty: Backport of trusty driver + +This adds Trusty driver from android-trusty-5.10 + +Original commits: +b60d55f33484 ANDROID: trusty-ipc: Allow registering multiple handles +629a4d3318cc ANDROID: trusty: Support setting trusty_shared_mem_id_t +94a36a1374e7 ANDROID: trusty-log: Don't copy Trusty logs to linux kernel log +efc21cced8af ANDROID: trusty-log: rework buffer allocation +8cb1a07ca814 ANDROID: trusty-ipc: Fix lock protection of shared_handles +52cdd137fae0 ANDROID: trusty-log: support poll() +24c3649dceb9 ANDROID: trusty-irq: enqueue work in trusty_irq_cpu_up +05a05bdd921e ANDROID: trusty: Add config TRUSTY_CRASH_IS_PANIC +b5fbdba2ec72 ANDROID: trusty-ipc: Fix crash when running out of txbuffers +46da5b95605e ANDROID: trusty: Allow TRUSTY_LEND of buffers +2ebfb16645af ANDROID: trusty-virtio: remove unnecessary include of dma-mapping.h +bf9d994a65a2 ANDROID: trusty-log: Complement logging sink with unthrottled virtual file +d5cb51d0365d ANDROID: trusty-log: Refactor logging state to support concurrent sinks +b421a5ad3eb3 ANDROID: trusty-log: Sanitize u32 overflow of the log ring buffer write index +58e9681c57af ANDROID: trusty-log: On trusty panic, unthrottle sink to the kernel log +ba12be0f203a ANDROID: trusty-log: Update trusty log buffer size to hold a complete Trusty crash logs +a8a3f83e52b6 ANDROID: trusty_qemu_defconfig: Enable dma-buf and ion system heaps +988b52b392a1 ANDROID: trusty: Support setting FF-A Tag +f544e96489aa ANDROID: Add trusty_qemu_defconfig +8a9b09317f29 ANDROID: trusty-ipc: Switch from memfd to dma_buf +5460418ec9a4 ANDROID: trusty-irq: document new way of specifying IPIs +da3c30b943c2 ANDROID: trusty-irq: specify IPIs in new way +5b5bb7f74856 ANDROID: trusty: Add trusty-test driver +e80d87f422fd ANDROID: trusty: Add trusty-ipc driver +03c248cbf693 ANDROID: trusty: Add trusty-virtio driver +1047661edb97 ANDROID: trusty: Add trusty-log driver +18fd5c59b423 ANDROID: trusty: Add trusty-irq driver +479c39a683f8 ANDROID: trusty: Add trusty-core driver + +Upstream-Status: Backport +Change-Id: I91f71b891a1091383a298e7fb2f9030382a19ca5 +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +--- + .../devicetree/bindings/trusty/trusty-irq.txt | 67 + + .../devicetree/bindings/trusty/trusty-smc.txt | 6 + + arch/arm/configs/trusty_qemu_defconfig | 291 +++ + .../configs/trusty_qemu_defconfig.fragment | 26 + + drivers/Kconfig | 2 + + drivers/Makefile | 1 + + drivers/trusty/Kconfig | 116 + + drivers/trusty/Makefile | 14 + + drivers/trusty/trusty-ipc.c | 2256 +++++++++++++++++ + drivers/trusty/trusty-irq.c | 645 +++++ + drivers/trusty/trusty-log.c | 830 ++++++ + drivers/trusty/trusty-log.h | 28 + + drivers/trusty/trusty-mem.c | 139 + + drivers/trusty/trusty-smc-arm.S | 41 + + drivers/trusty/trusty-smc-arm64.S | 35 + + drivers/trusty/trusty-smc.h | 26 + + drivers/trusty/trusty-test.c | 440 ++++ + drivers/trusty/trusty-test.h | 13 + + drivers/trusty/trusty-virtio.c | 840 ++++++ + drivers/trusty/trusty.c | 981 +++++++ + include/linux/trusty/arm_ffa.h | 590 +++++ + include/linux/trusty/sm_err.h | 28 + + include/linux/trusty/smcall.h | 124 + + include/linux/trusty/trusty.h | 131 + + include/linux/trusty/trusty_ipc.h | 89 + + include/uapi/linux/trusty/ipc.h | 65 + + include/uapi/linux/virtio_ids.h | 1 + + 27 files changed, 7825 insertions(+) + create mode 100644 Documentation/devicetree/bindings/trusty/trusty-irq.txt + create mode 100644 Documentation/devicetree/bindings/trusty/trusty-smc.txt + create mode 100644 arch/arm/configs/trusty_qemu_defconfig + create mode 100644 arch/arm64/configs/trusty_qemu_defconfig.fragment + create mode 100644 drivers/trusty/Kconfig + create mode 100644 drivers/trusty/Makefile + create mode 100644 drivers/trusty/trusty-ipc.c + create mode 100644 drivers/trusty/trusty-irq.c + create mode 100644 drivers/trusty/trusty-log.c + create mode 100644 drivers/trusty/trusty-log.h + create mode 100644 drivers/trusty/trusty-mem.c + create mode 100644 drivers/trusty/trusty-smc-arm.S + create mode 100644 drivers/trusty/trusty-smc-arm64.S + create mode 100644 drivers/trusty/trusty-smc.h + create mode 100644 drivers/trusty/trusty-test.c + create mode 100644 drivers/trusty/trusty-test.h + create mode 100644 drivers/trusty/trusty-virtio.c + create mode 100644 drivers/trusty/trusty.c + create mode 100644 include/linux/trusty/arm_ffa.h + create mode 100644 include/linux/trusty/sm_err.h + create mode 100644 include/linux/trusty/smcall.h + create mode 100644 include/linux/trusty/trusty.h + create mode 100644 include/linux/trusty/trusty_ipc.h + create mode 100644 include/uapi/linux/trusty/ipc.h + +diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +new file mode 100644 +index 000000000000..cbb545ad452b +--- /dev/null ++++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +@@ -0,0 +1,67 @@ ++Trusty irq interface ++ ++Trusty requires non-secure irqs to be forwarded to the secure OS. ++ ++Required properties: ++- compatible: "android,trusty-irq-v1" ++ ++Optional properties: ++ ++- interrupt-templates: is an optional property that works together ++ with "interrupt-ranges" to specify secure side to kernel IRQs mapping. ++ ++ It is a list of entries, each one of which defines a group of interrupts ++ having common properties, and has the following format: ++ < phandle irq_id_pos [templ_data]> ++ phandle - phandle of interrupt controller this template is for ++ irq_id_pos - the position of irq id in interrupt specifier array ++ for interrupt controller referenced by phandle. ++ templ_data - is an array of u32 values (could be empty) in the same ++ format as interrupt specifier for interrupt controller ++ referenced by phandle but with omitted irq id field. ++ ++- interrupt-ranges: list of entries that specifies secure side to kernel ++ IRQs mapping. ++ ++ Each entry in the "interrupt-ranges" list has the following format: ++ <beg end templ_idx> ++ beg - first entry in this range ++ end - last entry in this range ++ templ_idx - index of entry in "interrupt-templates" property ++ that must be used as a template for all interrupts ++ in this range ++ ++- ipi-range: optional mapping of a linear range of trusty IRQs to a linear range ++ of IPIs (inter-processor interrupts). This has the following format: ++ <beg end ipi_base> ++ beg - first trusty IRQ number that is an IPI ++ end - last trusty IRQ number that is an IPI ++ ipi_base - IPI number of 'beg' ++ ++Example: ++{ ++ gic: interrupt-controller@50041000 { ++ compatible = "arm,gic-400"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ ... ++ }; ++ ... ++ trusty { ++ compatible = "android,trusty-smc-v1"; ++ ranges; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ irq { ++ compatible = "android,trusty-irq-v1"; ++ interrupt-templates = <&gic 1 GIC_PPI 0>, ++ <&gic 1 GIC_SPI 0>; ++ interrupt-ranges = <16 31 0>, ++ <32 223 1>; ++ ipi-range = <8 15 8>; ++ }; ++ } ++} ++ ++Must be a child of the node that provides the trusty std/fast call interface. +diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt +new file mode 100644 +index 000000000000..1b39ad317c67 +--- /dev/null ++++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt +@@ -0,0 +1,6 @@ ++Trusty smc interface ++ ++Trusty is running in secure mode on the same (arm) cpu(s) as the current os. ++ ++Required properties: ++- compatible: "android,trusty-smc-v1" +diff --git a/arch/arm/configs/trusty_qemu_defconfig b/arch/arm/configs/trusty_qemu_defconfig +new file mode 100644 +index 000000000000..46ad9504c23d +--- /dev/null ++++ b/arch/arm/configs/trusty_qemu_defconfig +@@ -0,0 +1,291 @@ ++# CONFIG_LOCALVERSION_AUTO is not set ++# CONFIG_SWAP is not set ++CONFIG_POSIX_MQUEUE=y ++CONFIG_AUDIT=y ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_PREEMPT=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_BUF_SHIFT=14 ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_CGROUP_FREEZER=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_DEBUG=y ++CONFIG_SCHED_AUTOGROUP=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_EMBEDDED=y ++# CONFIG_COMPAT_BRK is not set ++CONFIG_PROFILING=y ++CONFIG_ARCH_VIRT=y ++CONFIG_PCI=y ++CONFIG_PCI_HOST_GENERIC=y ++CONFIG_SMP=y ++CONFIG_HIGHMEM=y ++CONFIG_SECCOMP=y ++CONFIG_CMDLINE="console=ttyAMA0" ++CONFIG_PM_AUTOSLEEP=y ++CONFIG_PM_WAKELOCKS=y ++CONFIG_PM_WAKELOCKS_LIMIT=0 ++# CONFIG_PM_WAKELOCKS_GC is not set ++CONFIG_PM_DEBUG=y ++# CONFIG_BLK_DEV_BSG is not set ++# CONFIG_IOSCHED_DEADLINE is not set ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_KSM=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_INET_ESP=y ++CONFIG_INET_DIAG_DESTROY=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=y ++CONFIG_INET6_ESP=y ++CONFIG_INET6_IPCOMP=y ++CONFIG_IPV6_MIP6=y ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_NETFILTER=y ++CONFIG_NF_CONNTRACK=y ++CONFIG_NF_CONNTRACK_SECMARK=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++CONFIG_NF_CONNTRACK_AMANDA=y ++CONFIG_NF_CONNTRACK_FTP=y ++CONFIG_NF_CONNTRACK_H323=y ++CONFIG_NF_CONNTRACK_IRC=y ++CONFIG_NF_CONNTRACK_NETBIOS_NS=y ++CONFIG_NF_CONNTRACK_PPTP=y ++CONFIG_NF_CONNTRACK_SANE=y ++CONFIG_NF_CONNTRACK_TFTP=y ++CONFIG_NF_CT_NETLINK=y ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=y ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y ++CONFIG_NETFILTER_XT_TARGET_MARK=y ++CONFIG_NETFILTER_XT_TARGET_NFLOG=y ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y ++CONFIG_NETFILTER_XT_TARGET_TPROXY=y ++CONFIG_NETFILTER_XT_TARGET_TRACE=y ++CONFIG_NETFILTER_XT_TARGET_SECMARK=y ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=y ++CONFIG_NETFILTER_XT_MATCH_COMMENT=y ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=y ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_HELPER=y ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=y ++CONFIG_NETFILTER_XT_MATCH_LENGTH=y ++CONFIG_NETFILTER_XT_MATCH_LIMIT=y ++CONFIG_NETFILTER_XT_MATCH_MAC=y ++CONFIG_NETFILTER_XT_MATCH_MARK=y ++CONFIG_NETFILTER_XT_MATCH_POLICY=y ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2=y ++CONFIG_NETFILTER_XT_MATCH_SOCKET=y ++CONFIG_NETFILTER_XT_MATCH_STATE=y ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=y ++CONFIG_NETFILTER_XT_MATCH_STRING=y ++CONFIG_NETFILTER_XT_MATCH_TIME=y ++CONFIG_NETFILTER_XT_MATCH_U32=y ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_MATCH_AH=y ++CONFIG_IP_NF_MATCH_ECN=y ++CONFIG_IP_NF_MATCH_RPFILTER=y ++CONFIG_IP_NF_MATCH_TTL=y ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_TARGET_REJECT=y ++CONFIG_IP_NF_MANGLE=y ++CONFIG_IP_NF_TARGET_ECN=y ++CONFIG_IP_NF_TARGET_TTL=y ++CONFIG_IP_NF_RAW=y ++CONFIG_IP_NF_SECURITY=y ++CONFIG_IP_NF_ARPTABLES=y ++CONFIG_IP_NF_ARPFILTER=y ++CONFIG_IP_NF_ARP_MANGLE=y ++CONFIG_IP6_NF_IPTABLES=y ++CONFIG_IP6_NF_MATCH_AH=y ++CONFIG_IP6_NF_MATCH_EUI64=y ++CONFIG_IP6_NF_MATCH_FRAG=y ++CONFIG_IP6_NF_MATCH_OPTS=y ++CONFIG_IP6_NF_MATCH_HL=y ++CONFIG_IP6_NF_MATCH_IPV6HEADER=y ++CONFIG_IP6_NF_MATCH_MH=y ++CONFIG_IP6_NF_MATCH_RT=y ++CONFIG_IP6_NF_TARGET_HL=y ++CONFIG_IP6_NF_FILTER=y ++CONFIG_IP6_NF_TARGET_REJECT=y ++CONFIG_IP6_NF_MANGLE=y ++CONFIG_IP6_NF_RAW=y ++CONFIG_BRIDGE=y ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_HTB=y ++CONFIG_NET_CLS_U32=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_U32=y ++CONFIG_NET_CLS_ACT=y ++# CONFIG_WIRELESS is not set ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_VIRTIO_BLK=y ++CONFIG_SCSI=y ++# CONFIG_SCSI_PROC_FS is not set ++CONFIG_BLK_DEV_SD=y ++# CONFIG_SCSI_LOWLEVEL is not set ++CONFIG_MD=y ++CONFIG_BLK_DEV_DM=y ++CONFIG_DM_CRYPT=y ++CONFIG_DM_UEVENT=y ++CONFIG_DM_VERITY=y ++CONFIG_DM_VERITY_FEC=y ++CONFIG_NETDEVICES=y ++CONFIG_TUN=y ++CONFIG_VIRTIO_NET=y ++CONFIG_E1000=y ++CONFIG_E1000E=y ++CONFIG_PPP=y ++CONFIG_PPP_BSDCOMP=y ++CONFIG_PPP_DEFLATE=y ++CONFIG_PPP_MPPE=y ++# CONFIG_WLAN is not set ++CONFIG_INPUT_EVDEV=y ++CONFIG_KEYBOARD_GOLDFISH_EVENTS=y ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_JOYSTICK=y ++CONFIG_INPUT_TABLET=y ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=y ++# CONFIG_SERIO_SERPORT is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_DEVMEM is not set ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++CONFIG_VIRTIO_CONSOLE=y ++# CONFIG_HW_RANDOM is not set ++CONFIG_BATTERY_GOLDFISH=y ++# CONFIG_HWMON is not set ++CONFIG_TRUSTY=y ++CONFIG_MEDIA_SUPPORT=y ++CONFIG_FB=y ++CONFIG_FB_GOLDFISH=y ++CONFIG_FB_SIMPLE=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_LOGO=y ++# CONFIG_LOGO_LINUX_MONO is not set ++# CONFIG_LOGO_LINUX_VGA16 is not set ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_HIDRAW=y ++CONFIG_UHID=y ++CONFIG_HID_A4TECH=y ++CONFIG_HID_ACRUX=y ++CONFIG_HID_ACRUX_FF=y ++CONFIG_HID_APPLE=y ++CONFIG_HID_BELKIN=y ++CONFIG_HID_CHERRY=y ++CONFIG_HID_CHICONY=y ++CONFIG_HID_PRODIKEYS=y ++CONFIG_HID_CYPRESS=y ++CONFIG_HID_DRAGONRISE=y ++CONFIG_DRAGONRISE_FF=y ++CONFIG_HID_EMS_FF=y ++CONFIG_HID_ELECOM=y ++CONFIG_HID_EZKEY=y ++CONFIG_HID_KEYTOUCH=y ++CONFIG_HID_KYE=y ++CONFIG_HID_WALTOP=y ++CONFIG_HID_GYRATION=y ++CONFIG_HID_TWINHAN=y ++CONFIG_HID_KENSINGTON=y ++CONFIG_HID_LCPOWER=y ++CONFIG_HID_LOGITECH=y ++CONFIG_HID_LOGITECH_DJ=y ++CONFIG_LOGITECH_FF=y ++CONFIG_LOGIRUMBLEPAD2_FF=y ++CONFIG_LOGIG940_FF=y ++CONFIG_HID_MAGICMOUSE=y ++CONFIG_HID_MICROSOFT=y ++CONFIG_HID_MONTEREY=y ++CONFIG_HID_MULTITOUCH=y ++CONFIG_HID_ORTEK=y ++CONFIG_HID_PANTHERLORD=y ++CONFIG_PANTHERLORD_FF=y ++CONFIG_HID_PETALYNX=y ++CONFIG_HID_PICOLCD=y ++CONFIG_HID_PRIMAX=y ++CONFIG_HID_SAITEK=y ++CONFIG_HID_SAMSUNG=y ++CONFIG_HID_SPEEDLINK=y ++CONFIG_HID_SUNPLUS=y ++CONFIG_HID_GREENASIA=y ++CONFIG_GREENASIA_FF=y ++CONFIG_HID_SMARTJOYPLUS=y ++CONFIG_SMARTJOYPLUS_FF=y ++CONFIG_HID_TIVO=y ++CONFIG_HID_TOPSEED=y ++CONFIG_HID_THRUSTMASTER=y ++CONFIG_HID_ZEROPLUS=y ++CONFIG_HID_ZYDACRON=y ++# CONFIG_USB_SUPPORT is not set ++CONFIG_RTC_CLASS=y ++CONFIG_VIRTIO_PCI=y ++CONFIG_VIRTIO_MMIO=y ++CONFIG_STAGING=y ++CONFIG_ASHMEM=y ++CONFIG_ION=y ++CONFIG_GOLDFISH_AUDIO=y ++CONFIG_GOLDFISH=y ++CONFIG_GOLDFISH_PIPE=y ++# CONFIG_IOMMU_SUPPORT is not set ++CONFIG_ANDROID=y ++CONFIG_ANDROID_BINDER_IPC=y ++CONFIG_EXT2_FS=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_QUOTA=y ++CONFIG_FUSE_FS=y ++CONFIG_CUSE=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++# CONFIG_MISC_FILESYSTEMS is not set ++CONFIG_NFS_FS=y ++CONFIG_ROOT_NFS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_SECURITY=y ++CONFIG_SECURITY_NETWORK=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_FS=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_PANIC_TIMEOUT=5 ++# CONFIG_SCHED_DEBUG is not set ++CONFIG_SCHEDSTATS=y ++# CONFIG_FTRACE is not set ++CONFIG_DMA_API_DEBUG=y ++CONFIG_ATOMIC64_SELFTEST=y +diff --git a/arch/arm64/configs/trusty_qemu_defconfig.fragment b/arch/arm64/configs/trusty_qemu_defconfig.fragment +new file mode 100644 +index 000000000000..166eef1797fd +--- /dev/null ++++ b/arch/arm64/configs/trusty_qemu_defconfig.fragment +@@ -0,0 +1,26 @@ ++# From goldfish ++CONFIG_VIRTIO_BLK=y ++CONFIG_VIRTIO_CONSOLE=y ++CONFIG_VIRTIO_INPUT=y ++CONFIG_VIRTIO_MMIO=y ++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y ++CONFIG_VIRTIO_NET=y ++CONFIG_VIRTIO_PCI=y ++CONFIG_VIRTIO_PMEM=y ++# From Trusty ++CONFIG_TRUSTY=y ++CONFIG_DMA_API_DEBUG=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_PROVE_LOCKING=y ++CONFIG_DEBUG_ATOMIC_SLEEP=y ++CONFIG_SEMIHOSTING_EXIT=y ++CONFIG_E1000=y ++CONFIG_E1000E=y ++CONFIG_REBOOT_EMULATOR_EXIT=y ++CONFIG_DMABUF_HEAPS_SYSTEM=y ++# securefb test uses ION ++CONFIG_ION=y ++CONFIG_ION_SYSTEM_HEAP=y ++# LTO slows down build times considerably. Disable it. ++# CONFIG_LTO_CLANG is not set ++# CONFIG_LTO_CLANG_FULL is not set +diff --git a/drivers/Kconfig b/drivers/Kconfig +index dcecc9f6e33f..2e9abcc98126 100644 +--- a/drivers/Kconfig ++++ b/drivers/Kconfig +@@ -86,6 +86,8 @@ source "drivers/hwmon/Kconfig" + + source "drivers/thermal/Kconfig" + ++source "drivers/trusty/Kconfig" ++ + source "drivers/watchdog/Kconfig" + + source "drivers/ssb/Kconfig" +diff --git a/drivers/Makefile b/drivers/Makefile +index 576228037718..7d15799dbe77 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -118,6 +118,7 @@ obj-$(CONFIG_W1) += w1/ + obj-y += power/ + obj-$(CONFIG_HWMON) += hwmon/ + obj-$(CONFIG_THERMAL) += thermal/ ++obj-$(CONFIG_TRUSTY) += trusty/ + obj-$(CONFIG_WATCHDOG) += watchdog/ + obj-$(CONFIG_MD) += md/ + obj-$(CONFIG_BT) += bluetooth/ +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +new file mode 100644 +index 000000000000..fcde7f097acf +--- /dev/null ++++ b/drivers/trusty/Kconfig +@@ -0,0 +1,116 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++# ++# Trusty driver ++# ++ ++menu "Trusty driver" ++ ++config TRUSTY ++ tristate "Trusty core driver" ++ depends on ARM || ARM64 ++ help ++ Trusty is a secure OS that provides a Trusted Execution Environment ++ (TEE) for Android. Trusty runs on the same processor as Linux but is ++ isolated from the rest of the system by both hardware and software. ++ ++ This option enables the core part of the Linux kernel driver for ++ Trusty. This doesn't do much by itself; you'll need to enable some of ++ the sub-modules too. ++ ++ If you build this as a module, it will be called trusty-core. ++ ++if TRUSTY ++ ++config TRUSTY_IRQ ++ tristate "Trusty IRQ support" ++ default y ++ help ++ Enable forwarding of IRQs from Linux to Trusty. This module retrieves ++ from Trusty a list of IRQs that Trusty uses, and it registers handlers ++ for them which notify Trusty that the IRQ has been received. ++ ++ If you build this as a module, it will be called trusty-irq. ++ ++ Usually this is needed for Trusty to work, so say 'y' or 'm'. ++ ++config TRUSTY_LOG ++ tristate "Trusty log support" ++ default y ++ help ++ Print log messages generated by the secure OS to the Linux kernel log. ++ ++ While this module is loaded, messages are retrieved and printed after ++ each call into Trusty, and also during Linux kernel panics. ++ ++ If you build this as a module, it will be called trusty-log. ++ ++config TRUSTY_TEST ++ tristate "Trusty stdcall test" ++ default y ++ help ++ Allow running tests of the Trusty stdcall interface. Running these ++ tests is initiated by userspace writing to a sysfs file. ++ ++ This depends on having a test sevice running on the Trusty side. ++ ++ If you build this as a module, it will be called trusty-test. ++ ++config TRUSTY_VIRTIO ++ tristate "Trusty virtio support" ++ select VIRTIO ++ default y ++ help ++ Enable the Trusty virtio driver, which is responsible for management ++ and interaction with virtio devices exposed by Trusty. This driver ++ requests the virtio device descriptors from Trusty, then parses them ++ and adds the corresponding virtio devices. ++ ++ If you build this as a module, it will be called trusty-virtio. ++ ++config TRUSTY_VIRTIO_IPC ++ tristate "Trusty Virtio IPC driver" ++ depends on TRUSTY_VIRTIO ++ default y ++ help ++ Enable support for communicating with Trusty services. ++ ++ If you build this as a module, it will be called trusty-ipc. ++ ++config TRUSTY_DMA_BUF_FFA_TAG ++ bool "Availability of trusty_dma_buf_get_ffa_tag" ++ default n ++ help ++ Whether trusty_dma_buf_get_ffa_tag is provided on this platform. ++ Providing this function will allow the platform to select what tag ++ should be passed to the SPM when attempting to transfer the buffer ++ to secure world. The value passed here is implementation defined and ++ may depend on your SPM. ++ ++ If set to N, a default implementation which returns 0 will be used. ++ ++config TRUSTY_DMA_BUF_SHARED_MEM_ID ++ bool "Availability of trusty_dma_buf_get_shared_mem_id" ++ default n ++ help ++ Whether trusty_dma_buf_get_shared_mem_id is provided on this platform. ++ Providing this function allows the platform to manage memory ++ transaction life cycle of DMA bufs independently of Trusty IPC driver. ++ The latter can query trusty_shared_mem_id_t value allocated for a ++ given DMA buf using trusty_dma_buf_get_shared_mem_id interface. ++ ++ If set to N, a default implementation which does not allocate any IDs ++ will be used. ++ ++config TRUSTY_CRASH_IS_PANIC ++ bool "When trusty panics, then panic the kernel" ++ help ++ This option will treat Trusty panics as fatal. This is useful if ++ your system cannot recover from Trusty panic/halt and you require ++ the system to reboot to recover. ++ ++ If N, it will contine to run the kernel, but trusty operations will ++ return errors. ++ ++endif # TRUSTY ++ ++endmenu +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +new file mode 100644 +index 000000000000..2cf1cfccf97b +--- /dev/null ++++ b/drivers/trusty/Makefile +@@ -0,0 +1,14 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++# ++# Makefile for trusty components ++# ++ ++obj-$(CONFIG_TRUSTY) += trusty-core.o ++trusty-core-objs += trusty.o trusty-mem.o ++trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o ++trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o ++obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o ++obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o ++obj-$(CONFIG_TRUSTY_TEST) += trusty-test.o ++obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o ++obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +new file mode 100644 +index 000000000000..82d6ddeb41f4 +--- /dev/null ++++ b/drivers/trusty/trusty-ipc.c +@@ -0,0 +1,2256 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2020 Google, Inc. ++ */ ++ ++#include <linux/aio.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/cdev.h> ++#include <linux/slab.h> ++#include <linux/fs.h> ++#include <linux/poll.h> ++#include <linux/idr.h> ++#include <linux/completion.h> ++#include <linux/dma-buf.h> ++#include <linux/sched.h> ++#include <linux/sched/signal.h> ++#include <linux/compat.h> ++#include <linux/uio.h> ++#include <linux/file.h> ++ ++#include <linux/virtio.h> ++#include <linux/virtio_ids.h> ++#include <linux/virtio_config.h> ++ ++#include <linux/trusty/trusty.h> ++#include <linux/trusty/trusty_ipc.h> ++ ++#include <uapi/linux/trusty/ipc.h> ++ ++#define MAX_DEVICES 4 ++ ++#define REPLY_TIMEOUT 5000 ++#define TXBUF_TIMEOUT 15000 ++ ++#define MAX_SRV_NAME_LEN 256 ++#define MAX_DEV_NAME_LEN 32 ++ ++#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE ++#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE ++ ++#define TIPC_CTRL_ADDR 53 ++#define TIPC_ANY_ADDR 0xFFFFFFFF ++ ++#define TIPC_MIN_LOCAL_ADDR 1024 ++ ++#ifdef CONFIG_COMPAT ++#define TIPC_IOC32_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, compat_uptr_t) ++#endif ++ ++struct tipc_virtio_dev; ++ ++struct tipc_dev_config { ++ u32 msg_buf_max_size; ++ u32 msg_buf_alignment; ++ char dev_name[MAX_DEV_NAME_LEN]; ++} __packed; ++ ++struct tipc_shm { ++ trusty_shared_mem_id_t obj_id; ++ u64 size; ++ u64 tag; ++}; ++ ++struct tipc_msg_hdr { ++ u32 src; ++ u32 dst; ++ u16 reserved; ++ u16 shm_cnt; ++ u16 len; ++ u16 flags; ++ u8 data[]; ++} __packed; ++ ++enum tipc_ctrl_msg_types { ++ TIPC_CTRL_MSGTYPE_GO_ONLINE = 1, ++ TIPC_CTRL_MSGTYPE_GO_OFFLINE, ++ TIPC_CTRL_MSGTYPE_CONN_REQ, ++ TIPC_CTRL_MSGTYPE_CONN_RSP, ++ TIPC_CTRL_MSGTYPE_DISC_REQ, ++ TIPC_CTRL_MSGTYPE_RELEASE, ++}; ++ ++struct tipc_ctrl_msg { ++ u32 type; ++ u32 body_len; ++ u8 body[]; ++} __packed; ++ ++struct tipc_conn_req_body { ++ char name[MAX_SRV_NAME_LEN]; ++} __packed; ++ ++struct tipc_conn_rsp_body { ++ u32 target; ++ u32 status; ++ u32 remote; ++ u32 max_msg_size; ++ u32 max_msg_cnt; ++} __packed; ++ ++struct tipc_disc_req_body { ++ u32 target; ++} __packed; ++ ++struct tipc_release_body { ++ trusty_shared_mem_id_t id; ++} __packed; ++ ++struct tipc_cdev_node { ++ struct cdev cdev; ++ struct device *dev; ++ unsigned int minor; ++}; ++ ++enum tipc_device_state { ++ VDS_OFFLINE = 0, ++ VDS_ONLINE, ++ VDS_DEAD, ++}; ++ ++struct tipc_virtio_dev { ++ struct kref refcount; ++ struct mutex lock; /* protects access to this device */ ++ struct virtio_device *vdev; ++ struct virtqueue *rxvq; ++ struct virtqueue *txvq; ++ unsigned int msg_buf_cnt; ++ unsigned int msg_buf_max_cnt; ++ size_t msg_buf_max_sz; ++ unsigned int free_msg_buf_cnt; ++ struct list_head free_buf_list; ++ wait_queue_head_t sendq; ++ struct idr addr_idr; ++ enum tipc_device_state state; ++ struct tipc_cdev_node cdev_node; ++ /* protects shared_handles, dev lock never acquired while held */ ++ struct mutex shared_handles_lock; ++ struct rb_root shared_handles; ++ char cdev_name[MAX_DEV_NAME_LEN]; ++}; ++ ++enum tipc_chan_state { ++ TIPC_DISCONNECTED = 0, ++ TIPC_CONNECTING, ++ TIPC_CONNECTED, ++ TIPC_STALE, ++}; ++ ++struct tipc_chan { ++ struct mutex lock; /* protects channel state */ ++ struct kref refcount; ++ enum tipc_chan_state state; ++ struct tipc_virtio_dev *vds; ++ const struct tipc_chan_ops *ops; ++ void *ops_arg; ++ u32 remote; ++ u32 local; ++ u32 max_msg_size; ++ u32 max_msg_cnt; ++ char srv_name[MAX_SRV_NAME_LEN]; ++}; ++ ++struct tipc_shared_handle { ++ struct rb_node node; ++ struct tipc_shm tipc; ++ struct tipc_virtio_dev *vds; ++ struct dma_buf *dma_buf; ++ bool shared; ++ /* ++ * Following fields are only used if dma_buf does not own a ++ * trusty_shared_mem_id_t. ++ */ ++ struct dma_buf_attachment *attach; ++ struct sg_table *sgt; ++}; ++ ++static struct class *tipc_class; ++static unsigned int tipc_major; ++ ++static struct virtio_device *default_vdev; ++ ++static DEFINE_IDR(tipc_devices); ++static DEFINE_MUTEX(tipc_devices_lock); ++ ++static int _match_any(int id, void *p, void *data) ++{ ++ return id; ++} ++ ++static int _match_data(int id, void *p, void *data) ++{ ++ return (p == data); ++} ++ ++static void *_alloc_shareable_mem(size_t sz, gfp_t gfp) ++{ ++ return alloc_pages_exact(sz, gfp); ++} ++ ++static void _free_shareable_mem(size_t sz, void *va) ++{ ++ free_pages_exact(va, sz); ++} ++ ++static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds, ++ bool share_write) ++{ ++ int ret; ++ struct tipc_msg_buf *mb; ++ size_t sz = vds->msg_buf_max_sz; ++ pgprot_t pgprot = share_write ? PAGE_KERNEL : PAGE_KERNEL_RO; ++ ++ /* allocate tracking structure */ ++ mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL); ++ if (!mb) ++ return NULL; ++ ++ /* allocate buffer that can be shared with secure world */ ++ mb->buf_va = _alloc_shareable_mem(sz, GFP_KERNEL); ++ if (!mb->buf_va) ++ goto err_alloc; ++ ++ sg_init_one(&mb->sg, mb->buf_va, sz); ++ ret = trusty_share_memory_compat(vds->vdev->dev.parent->parent, ++ &mb->buf_id, &mb->sg, 1, pgprot); ++ if (ret) { ++ dev_err(&vds->vdev->dev, "trusty_share_memory failed: %d\n", ++ ret); ++ goto err_share; ++ } ++ ++ mb->buf_sz = sz; ++ mb->shm_cnt = 0; ++ ++ return mb; ++ ++err_share: ++ _free_shareable_mem(sz, mb->buf_va); ++err_alloc: ++ kfree(mb); ++ return NULL; ++} ++ ++static void vds_free_msg_buf(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ int ret; ++ ++ ret = trusty_reclaim_memory(vds->vdev->dev.parent->parent, mb->buf_id, ++ &mb->sg, 1); ++ if (WARN_ON(ret)) { ++ dev_err(&vds->vdev->dev, ++ "trusty_revoke_memory failed: %d txbuf %lld\n", ++ ret, mb->buf_id); ++ ++ /* ++ * It is not safe to free this memory if trusty_revoke_memory ++ * fails. Leak it in that case. ++ */ ++ } else { ++ _free_shareable_mem(mb->buf_sz, mb->buf_va); ++ } ++ kfree(mb); ++} ++ ++static void vds_free_msg_buf_list(struct tipc_virtio_dev *vds, ++ struct list_head *list) ++{ ++ struct tipc_msg_buf *mb = NULL; ++ ++ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); ++ while (mb) { ++ list_del(&mb->node); ++ vds_free_msg_buf(vds, mb); ++ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); ++ } ++} ++ ++static inline void mb_reset(struct tipc_msg_buf *mb) ++{ ++ mb->wpos = 0; ++ mb->rpos = 0; ++} ++ ++static inline void mb_reset_read(struct tipc_msg_buf *mb) ++{ ++ mb->rpos = 0; ++} ++ ++static void _free_vds(struct kref *kref) ++{ ++ struct tipc_virtio_dev *vds = ++ container_of(kref, struct tipc_virtio_dev, refcount); ++ /* ++ * If this WARN triggers, we're leaking remote memory references. ++ * ++ * No need to lock shared_handles_lock. All references to this lock ++ * should already be gone by this point, since we are freeing it in this ++ * function. ++ */ ++ WARN_ON(!RB_EMPTY_ROOT(&vds->shared_handles)); ++ kfree(vds); ++} ++ ++static void _free_chan(struct kref *kref) ++{ ++ struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount); ++ ++ if (ch->ops && ch->ops->handle_release) ++ ch->ops->handle_release(ch->ops_arg); ++ ++ kref_put(&ch->vds->refcount, _free_vds); ++ kfree(ch); ++} ++ ++static bool _put_txbuf_locked(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ list_add_tail(&mb->node, &vds->free_buf_list); ++ return vds->free_msg_buf_cnt++ == 0; ++} ++ ++static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds) ++{ ++ struct tipc_msg_buf *mb; ++ ++ if (vds->state != VDS_ONLINE) ++ return ERR_PTR(-ENODEV); ++ ++ if (vds->free_msg_buf_cnt) { ++ /* take it out of free list */ ++ mb = list_first_entry(&vds->free_buf_list, ++ struct tipc_msg_buf, node); ++ list_del(&mb->node); ++ mb->shm_cnt = 0; ++ vds->free_msg_buf_cnt--; ++ } else { ++ if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt) ++ return ERR_PTR(-EAGAIN); ++ ++ /* try to allocate it */ ++ mb = vds_alloc_msg_buf(vds, false); ++ if (!mb) ++ return ERR_PTR(-ENOMEM); ++ ++ vds->msg_buf_cnt++; ++ } ++ return mb; ++} ++ ++static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds) ++{ ++ struct tipc_msg_buf *mb; ++ ++ mutex_lock(&vds->lock); ++ mb = _get_txbuf_locked(vds); ++ mutex_unlock(&vds->lock); ++ ++ return mb; ++} ++ ++static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb) ++{ ++ mutex_lock(&vds->lock); ++ _put_txbuf_locked(vds, mb); ++ wake_up_interruptible(&vds->sendq); ++ mutex_unlock(&vds->lock); ++} ++ ++static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds, ++ long timeout) ++{ ++ struct tipc_msg_buf *mb; ++ ++ mb = _vds_get_txbuf(vds); ++ ++ if ((PTR_ERR(mb) == -EAGAIN) && timeout) { ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); ++ ++ timeout = msecs_to_jiffies(timeout); ++ add_wait_queue(&vds->sendq, &wait); ++ for (;;) { ++ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, ++ timeout); ++ if (!timeout) { ++ mb = ERR_PTR(-ETIMEDOUT); ++ break; ++ } ++ ++ if (signal_pending(current)) { ++ mb = ERR_PTR(-ERESTARTSYS); ++ break; ++ } ++ ++ mb = _vds_get_txbuf(vds); ++ if (PTR_ERR(mb) != -EAGAIN) ++ break; ++ } ++ remove_wait_queue(&vds->sendq, &wait); ++ } ++ ++ if (IS_ERR(mb)) ++ return mb; ++ ++ if (WARN_ON(!mb)) ++ return ERR_PTR(-EINVAL); ++ ++ /* reset and reserve space for message header */ ++ mb_reset(mb); ++ mb_put_data(mb, sizeof(struct tipc_msg_hdr)); ++ ++ return mb; ++} ++ ++static int vds_queue_txbuf(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ int err; ++ struct scatterlist sg; ++ bool need_notify = false; ++ ++ mutex_lock(&vds->lock); ++ if (vds->state == VDS_ONLINE) { ++ sg_init_one(&sg, mb, mb->wpos); ++ err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL); ++ need_notify = virtqueue_kick_prepare(vds->txvq); ++ } else { ++ err = -ENODEV; ++ } ++ mutex_unlock(&vds->lock); ++ ++ if (need_notify) ++ virtqueue_notify(vds->txvq); ++ ++ return err; ++} ++ ++static int vds_add_channel(struct tipc_virtio_dev *vds, ++ struct tipc_chan *chan) ++{ ++ int ret; ++ ++ mutex_lock(&vds->lock); ++ if (vds->state == VDS_ONLINE) { ++ ret = idr_alloc(&vds->addr_idr, chan, ++ TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1, ++ GFP_KERNEL); ++ if (ret > 0) { ++ chan->local = ret; ++ kref_get(&chan->refcount); ++ ret = 0; ++ } ++ } else { ++ ret = -EINVAL; ++ } ++ mutex_unlock(&vds->lock); ++ ++ return ret; ++} ++ ++static void vds_del_channel(struct tipc_virtio_dev *vds, ++ struct tipc_chan *chan) ++{ ++ mutex_lock(&vds->lock); ++ if (chan->local) { ++ idr_remove(&vds->addr_idr, chan->local); ++ chan->local = 0; ++ chan->remote = 0; ++ kref_put(&chan->refcount, _free_chan); ++ } ++ mutex_unlock(&vds->lock); ++} ++ ++static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds, ++ u32 addr) ++{ ++ int id; ++ struct tipc_chan *chan = NULL; ++ ++ mutex_lock(&vds->lock); ++ if (addr == TIPC_ANY_ADDR) { ++ id = idr_for_each(&vds->addr_idr, _match_any, NULL); ++ if (id > 0) ++ chan = idr_find(&vds->addr_idr, id); ++ } else { ++ chan = idr_find(&vds->addr_idr, addr); ++ } ++ if (chan) ++ kref_get(&chan->refcount); ++ mutex_unlock(&vds->lock); ++ ++ return chan; ++} ++ ++static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds, ++ const struct tipc_chan_ops *ops, ++ void *ops_arg) ++{ ++ int ret; ++ struct tipc_chan *chan = NULL; ++ ++ if (!vds) ++ return ERR_PTR(-ENOENT); ++ ++ if (!ops) ++ return ERR_PTR(-EINVAL); ++ ++ chan = kzalloc(sizeof(*chan), GFP_KERNEL); ++ if (!chan) ++ return ERR_PTR(-ENOMEM); ++ ++ kref_get(&vds->refcount); ++ chan->vds = vds; ++ chan->ops = ops; ++ chan->ops_arg = ops_arg; ++ mutex_init(&chan->lock); ++ kref_init(&chan->refcount); ++ chan->state = TIPC_DISCONNECTED; ++ ++ ret = vds_add_channel(vds, chan); ++ if (ret) { ++ kfree(chan); ++ kref_put(&vds->refcount, _free_vds); ++ return ERR_PTR(ret); ++ } ++ ++ return chan; ++} ++ ++static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst) ++{ ++ struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr)); ++ ++ hdr->src = src; ++ hdr->dst = dst; ++ hdr->len = mb_avail_data(mb); ++ hdr->flags = 0; ++ hdr->shm_cnt = mb->shm_cnt; ++ hdr->reserved = 0; ++} ++ ++static int tipc_shared_handle_new(struct tipc_shared_handle **shared_handle, ++ struct tipc_virtio_dev *vds) ++{ ++ struct tipc_shared_handle *out = kzalloc(sizeof(*out), GFP_KERNEL); ++ ++ if (!out) ++ return -ENOMEM; ++ ++ out->vds = vds; ++ *shared_handle = out; ++ ++ return 0; ++} ++ ++static struct device *tipc_shared_handle_dev(struct tipc_shared_handle ++ *shared_handle) ++{ ++ return shared_handle->vds->vdev->dev.parent->parent; ++} ++ ++static bool is_same_memory_region(struct tipc_shared_handle *h1, ++ struct tipc_shared_handle *h2) ++{ ++ return h1->tipc.obj_id == h2->tipc.obj_id && ++ h1->tipc.size == h2->tipc.size && ++ h1->tipc.tag == h2->tipc.tag && ++ h1->dma_buf == h2->dma_buf && ++ h1->shared == h2->shared; ++} ++ ++static bool dma_buf_owns_shared_mem_id(struct tipc_shared_handle *h) ++{ ++ /* h->shared is true only if dma_buf did not own an shared memory ID */ ++ return !h->shared; ++} ++ ++static void tipc_shared_handle_register(struct tipc_shared_handle ++ *new_handle) ++{ ++ struct tipc_virtio_dev *vds = new_handle->vds; ++ struct rb_node **new; ++ struct rb_node *parent = NULL; ++ ++ mutex_lock(&vds->shared_handles_lock); ++ ++ new = &vds->shared_handles.rb_node; ++ while (*new) { ++ struct tipc_shared_handle *handle = ++ rb_entry(*new, struct tipc_shared_handle, node); ++ parent = *new; ++ /* ++ * An obj_id can be registered multiple times if it's owned by a ++ * dma_buf, because in this case we use the same obj_id across ++ * multiple memory transfer operations. ++ */ ++ if (handle->tipc.obj_id == new_handle->tipc.obj_id) { ++ if (dma_buf_owns_shared_mem_id(new_handle)) { ++ WARN_ON(!is_same_memory_region(handle, ++ new_handle)); ++ } else { ++ WARN(1, "This handle is already registered"); ++ goto already_registered; ++ } ++ } ++ ++ if (handle->tipc.obj_id > new_handle->tipc.obj_id) ++ new = &((*new)->rb_left); ++ else ++ new = &((*new)->rb_right); ++ } ++ ++ rb_link_node(&new_handle->node, parent, new); ++ rb_insert_color(&new_handle->node, &vds->shared_handles); ++ ++already_registered: ++ mutex_unlock(&vds->shared_handles_lock); ++} ++ ++static struct tipc_shared_handle *tipc_shared_handle_take(struct tipc_virtio_dev ++ *vds, ++ trusty_shared_mem_id_t ++ obj_id) ++{ ++ struct rb_node *node; ++ struct tipc_shared_handle *out = NULL; ++ ++ mutex_lock(&vds->shared_handles_lock); ++ ++ node = vds->shared_handles.rb_node; ++ while (node) { ++ struct tipc_shared_handle *handle = ++ rb_entry(node, struct tipc_shared_handle, node); ++ if (obj_id == handle->tipc.obj_id) { ++ rb_erase(node, &vds->shared_handles); ++ out = handle; ++ break; ++ } else if (obj_id < handle->tipc.obj_id) { ++ node = node->rb_left; ++ } else { ++ node = node->rb_right; ++ } ++ } ++ ++ mutex_unlock(&vds->shared_handles_lock); ++ ++ return out; ++} ++ ++static int tipc_shared_handle_drop(struct tipc_shared_handle *shared_handle) ++{ ++ int ret; ++ struct tipc_virtio_dev *vds = shared_handle->vds; ++ struct device *dev = tipc_shared_handle_dev(shared_handle); ++ ++ if (shared_handle->shared) { ++ /* ++ * If this warning fires, it means this shared handle was still ++ * in the set of active handles. This shouldn't happen (calling ++ * code should ensure it is out if the tree) but this serves as ++ * an extra check before it is released. ++ * ++ * However, the take itself should clean this incorrect state up ++ * by removing the handle from the tree. ++ * ++ * This warning is only applicable when registering a handle ++ * multiple times is not allowed, i.e. when dma_buf doesn't own ++ * the handle. ++ */ ++ WARN_ON(tipc_shared_handle_take(vds, ++ shared_handle->tipc.obj_id)); ++ ++ ret = trusty_reclaim_memory(dev, ++ shared_handle->tipc.obj_id, ++ shared_handle->sgt->sgl, ++ shared_handle->sgt->orig_nents); ++ if (ret) { ++ /* ++ * We can't safely release this, it may still be in ++ * use outside Linux. ++ */ ++ dev_warn(dev, "Failed to drop handle, leaking...\n"); ++ return ret; ++ } ++ } ++ ++ if (shared_handle->sgt) ++ dma_buf_unmap_attachment(shared_handle->attach, ++ shared_handle->sgt, DMA_BIDIRECTIONAL); ++ if (shared_handle->attach) ++ dma_buf_detach(shared_handle->dma_buf, shared_handle->attach); ++ if (shared_handle->dma_buf) ++ dma_buf_put(shared_handle->dma_buf); ++ ++ kfree(shared_handle); ++ ++ return 0; ++} ++ ++/*****************************************************************************/ ++ ++struct tipc_chan *tipc_create_channel(struct device *dev, ++ const struct tipc_chan_ops *ops, ++ void *ops_arg) ++{ ++ struct virtio_device *vd; ++ struct tipc_chan *chan; ++ struct tipc_virtio_dev *vds; ++ ++ mutex_lock(&tipc_devices_lock); ++ if (dev) { ++ vd = container_of(dev, struct virtio_device, dev); ++ } else { ++ vd = default_vdev; ++ if (!vd) { ++ mutex_unlock(&tipc_devices_lock); ++ return ERR_PTR(-ENOENT); ++ } ++ } ++ vds = vd->priv; ++ kref_get(&vds->refcount); ++ mutex_unlock(&tipc_devices_lock); ++ ++ chan = vds_create_channel(vds, ops, ops_arg); ++ kref_put(&vds->refcount, _free_vds); ++ return chan; ++} ++EXPORT_SYMBOL(tipc_create_channel); ++ ++struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan) ++{ ++ return vds_alloc_msg_buf(chan->vds, true); ++} ++EXPORT_SYMBOL(tipc_chan_get_rxbuf); ++ ++void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) ++{ ++ vds_free_msg_buf(chan->vds, mb); ++} ++EXPORT_SYMBOL(tipc_chan_put_rxbuf); ++ ++struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, ++ long timeout) ++{ ++ return vds_get_txbuf(chan->vds, timeout); ++} ++EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout); ++ ++void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) ++{ ++ vds_put_txbuf(chan->vds, mb); ++} ++EXPORT_SYMBOL(tipc_chan_put_txbuf); ++ ++int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb) ++{ ++ int err; ++ ++ mutex_lock(&chan->lock); ++ switch (chan->state) { ++ case TIPC_CONNECTED: ++ fill_msg_hdr(mb, chan->local, chan->remote); ++ err = vds_queue_txbuf(chan->vds, mb); ++ if (err) { ++ /* this should never happen */ ++ dev_err(&chan->vds->vdev->dev, ++ "%s: failed to queue tx buffer (%d)\n", ++ __func__, err); ++ } ++ break; ++ case TIPC_DISCONNECTED: ++ case TIPC_CONNECTING: ++ err = -ENOTCONN; ++ break; ++ case TIPC_STALE: ++ err = -ESHUTDOWN; ++ break; ++ default: ++ err = -EBADFD; ++ dev_err(&chan->vds->vdev->dev, ++ "%s: unexpected channel state %d\n", ++ __func__, chan->state); ++ } ++ mutex_unlock(&chan->lock); ++ return err; ++} ++EXPORT_SYMBOL(tipc_chan_queue_msg); ++ ++ ++int tipc_chan_connect(struct tipc_chan *chan, const char *name) ++{ ++ int err; ++ struct tipc_ctrl_msg *msg; ++ struct tipc_conn_req_body *body; ++ struct tipc_msg_buf *txbuf; ++ ++ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); ++ if (IS_ERR(txbuf)) ++ return PTR_ERR(txbuf); ++ ++ /* reserve space for connection request control message */ ++ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); ++ body = (struct tipc_conn_req_body *)msg->body; ++ ++ /* fill message */ ++ msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ; ++ msg->body_len = sizeof(*body); ++ ++ strncpy(body->name, name, sizeof(body->name)); ++ body->name[sizeof(body->name)-1] = '\0'; ++ ++ mutex_lock(&chan->lock); ++ switch (chan->state) { ++ case TIPC_DISCONNECTED: ++ /* save service name we are connecting to */ ++ strcpy(chan->srv_name, body->name); ++ ++ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); ++ err = vds_queue_txbuf(chan->vds, txbuf); ++ if (err) { ++ /* this should never happen */ ++ dev_err(&chan->vds->vdev->dev, ++ "%s: failed to queue tx buffer (%d)\n", ++ __func__, err); ++ } else { ++ chan->state = TIPC_CONNECTING; ++ txbuf = NULL; /* prevents discarding buffer */ ++ } ++ break; ++ case TIPC_CONNECTED: ++ case TIPC_CONNECTING: ++ /* check if we are trying to connect to the same service */ ++ if (strcmp(chan->srv_name, body->name) == 0) ++ err = 0; ++ else ++ if (chan->state == TIPC_CONNECTING) ++ err = -EALREADY; /* in progress */ ++ else ++ err = -EISCONN; /* already connected */ ++ break; ++ ++ case TIPC_STALE: ++ err = -ESHUTDOWN; ++ break; ++ default: ++ err = -EBADFD; ++ dev_err(&chan->vds->vdev->dev, ++ "%s: unexpected channel state %d\n", ++ __func__, chan->state); ++ break; ++ } ++ mutex_unlock(&chan->lock); ++ ++ if (txbuf) ++ tipc_chan_put_txbuf(chan, txbuf); /* discard it */ ++ ++ return err; ++} ++EXPORT_SYMBOL(tipc_chan_connect); ++ ++int tipc_chan_shutdown(struct tipc_chan *chan) ++{ ++ int err; ++ struct tipc_ctrl_msg *msg; ++ struct tipc_disc_req_body *body; ++ struct tipc_msg_buf *txbuf = NULL; ++ ++ /* get tx buffer */ ++ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); ++ if (IS_ERR(txbuf)) ++ return PTR_ERR(txbuf); ++ ++ mutex_lock(&chan->lock); ++ if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) { ++ /* reserve space for disconnect request control message */ ++ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); ++ body = (struct tipc_disc_req_body *)msg->body; ++ ++ msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ; ++ msg->body_len = sizeof(*body); ++ body->target = chan->remote; ++ ++ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); ++ err = vds_queue_txbuf(chan->vds, txbuf); ++ if (err) { ++ /* this should never happen */ ++ dev_err(&chan->vds->vdev->dev, ++ "%s: failed to queue tx buffer (%d)\n", ++ __func__, err); ++ } ++ } else { ++ err = -ENOTCONN; ++ } ++ chan->state = TIPC_STALE; ++ mutex_unlock(&chan->lock); ++ ++ if (err) { ++ /* release buffer */ ++ tipc_chan_put_txbuf(chan, txbuf); ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL(tipc_chan_shutdown); ++ ++void tipc_chan_destroy(struct tipc_chan *chan) ++{ ++ vds_del_channel(chan->vds, chan); ++ kref_put(&chan->refcount, _free_chan); ++} ++EXPORT_SYMBOL(tipc_chan_destroy); ++ ++/***************************************************************************/ ++ ++struct tipc_dn_chan { ++ int state; ++ struct mutex lock; /* protects rx_msg_queue list and channel state */ ++ struct tipc_chan *chan; ++ wait_queue_head_t readq; ++ struct completion reply_comp; ++ struct list_head rx_msg_queue; ++}; ++ ++static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout) ++{ ++ int ret; ++ ++ ret = wait_for_completion_interruptible_timeout(&dn->reply_comp, ++ msecs_to_jiffies(timeout)); ++ if (ret < 0) ++ return ret; ++ ++ mutex_lock(&dn->lock); ++ if (!ret) { ++ /* no reply from remote */ ++ dn->state = TIPC_STALE; ++ ret = -ETIMEDOUT; ++ } else { ++ /* got reply */ ++ if (dn->state == TIPC_CONNECTED) ++ ret = 0; ++ else if (dn->state == TIPC_DISCONNECTED) ++ if (!list_empty(&dn->rx_msg_queue)) ++ ret = 0; ++ else ++ ret = -ENOTCONN; ++ else ++ ret = -EIO; ++ } ++ mutex_unlock(&dn->lock); ++ ++ return ret; ++} ++ ++static struct tipc_msg_buf *dn_handle_msg(void *data, ++ struct tipc_msg_buf *rxbuf) ++{ ++ struct tipc_dn_chan *dn = data; ++ struct tipc_msg_buf *newbuf = rxbuf; ++ ++ mutex_lock(&dn->lock); ++ if (dn->state == TIPC_CONNECTED) { ++ /* get new buffer */ ++ newbuf = tipc_chan_get_rxbuf(dn->chan); ++ if (newbuf) { ++ /* queue an old buffer and return a new one */ ++ list_add_tail(&rxbuf->node, &dn->rx_msg_queue); ++ wake_up_interruptible(&dn->readq); ++ } else { ++ /* ++ * return an old buffer effectively discarding ++ * incoming message ++ */ ++ dev_err(&dn->chan->vds->vdev->dev, ++ "%s: discard incoming message\n", __func__); ++ newbuf = rxbuf; ++ } ++ } ++ mutex_unlock(&dn->lock); ++ ++ return newbuf; ++} ++ ++static void dn_connected(struct tipc_dn_chan *dn) ++{ ++ mutex_lock(&dn->lock); ++ dn->state = TIPC_CONNECTED; ++ ++ /* complete all pending */ ++ complete(&dn->reply_comp); ++ ++ mutex_unlock(&dn->lock); ++} ++ ++static void dn_disconnected(struct tipc_dn_chan *dn) ++{ ++ mutex_lock(&dn->lock); ++ dn->state = TIPC_DISCONNECTED; ++ ++ /* complete all pending */ ++ complete(&dn->reply_comp); ++ ++ /* wakeup all readers */ ++ wake_up_interruptible_all(&dn->readq); ++ ++ mutex_unlock(&dn->lock); ++} ++ ++static void dn_shutdown(struct tipc_dn_chan *dn) ++{ ++ mutex_lock(&dn->lock); ++ ++ /* set state to STALE */ ++ dn->state = TIPC_STALE; ++ ++ /* complete all pending */ ++ complete(&dn->reply_comp); ++ ++ /* wakeup all readers */ ++ wake_up_interruptible_all(&dn->readq); ++ ++ mutex_unlock(&dn->lock); ++} ++ ++static void dn_handle_event(void *data, int event) ++{ ++ struct tipc_dn_chan *dn = data; ++ ++ switch (event) { ++ case TIPC_CHANNEL_SHUTDOWN: ++ dn_shutdown(dn); ++ break; ++ ++ case TIPC_CHANNEL_DISCONNECTED: ++ dn_disconnected(dn); ++ break; ++ ++ case TIPC_CHANNEL_CONNECTED: ++ dn_connected(dn); ++ break; ++ ++ default: ++ dev_err(&dn->chan->vds->vdev->dev, ++ "%s: unhandled event %d\n", __func__, event); ++ break; ++ } ++} ++ ++static void dn_handle_release(void *data) ++{ ++ kfree(data); ++} ++ ++static const struct tipc_chan_ops _dn_ops = { ++ .handle_msg = dn_handle_msg, ++ .handle_event = dn_handle_event, ++ .handle_release = dn_handle_release, ++}; ++ ++#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev) ++#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node) ++ ++static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn) ++{ ++ int ret; ++ struct tipc_virtio_dev *vds = NULL; ++ ++ mutex_lock(&tipc_devices_lock); ++ ret = idr_for_each(&tipc_devices, _match_data, cdn); ++ if (ret) { ++ vds = cdn_to_vds(cdn); ++ kref_get(&vds->refcount); ++ } ++ mutex_unlock(&tipc_devices_lock); ++ return vds; ++} ++ ++static int tipc_open(struct inode *inode, struct file *filp) ++{ ++ int ret; ++ struct tipc_virtio_dev *vds; ++ struct tipc_dn_chan *dn; ++ struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev); ++ ++ vds = _dn_lookup_vds(cdn); ++ if (!vds) { ++ ret = -ENOENT; ++ goto err_vds_lookup; ++ } ++ ++ dn = kzalloc(sizeof(*dn), GFP_KERNEL); ++ if (!dn) { ++ ret = -ENOMEM; ++ goto err_alloc_chan; ++ } ++ ++ mutex_init(&dn->lock); ++ init_waitqueue_head(&dn->readq); ++ init_completion(&dn->reply_comp); ++ INIT_LIST_HEAD(&dn->rx_msg_queue); ++ ++ dn->state = TIPC_DISCONNECTED; ++ ++ dn->chan = vds_create_channel(vds, &_dn_ops, dn); ++ if (IS_ERR(dn->chan)) { ++ ret = PTR_ERR(dn->chan); ++ goto err_create_chan; ++ } ++ ++ filp->private_data = dn; ++ kref_put(&vds->refcount, _free_vds); ++ return 0; ++ ++err_create_chan: ++ kfree(dn); ++err_alloc_chan: ++ kref_put(&vds->refcount, _free_vds); ++err_vds_lookup: ++ return ret; ++} ++ ++ ++static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name) ++{ ++ int ret; ++ char name[MAX_SRV_NAME_LEN]; ++ ++ /* copy in service name from user space */ ++ ret = strncpy_from_user(name, usr_name, sizeof(name)); ++ if (ret < 0) ++ return ret; ++ if (ret == sizeof(name)) ++ return -ENAMETOOLONG; ++ ++ /* send connect request */ ++ ret = tipc_chan_connect(dn->chan, name); ++ if (ret) ++ return ret; ++ ++ /* and wait for reply */ ++ return dn_wait_for_reply(dn, REPLY_TIMEOUT); ++} ++ ++static int dn_share_fd(struct tipc_dn_chan *dn, int fd, ++ enum transfer_kind transfer_kind, ++ struct tipc_shared_handle **out) ++{ ++ int ret = 0; ++ struct tipc_shared_handle *shared_handle = NULL; ++ struct file *file = NULL; ++ struct device *dev = &dn->chan->vds->vdev->dev; ++ bool writable = false; ++ pgprot_t prot; ++ u64 tag = 0; ++ trusty_shared_mem_id_t mem_id; ++ bool lend; ++ ++ if (dn->state != TIPC_CONNECTED) { ++ dev_dbg(dev, "Tried to share fd while not connected\n"); ++ return -ENOTCONN; ++ } ++ ++ file = fget(fd); ++ if (!file) { ++ dev_dbg(dev, "Invalid fd (%d)\n", fd); ++ return -EBADF; ++ } ++ ++ if (!(file->f_mode & FMODE_READ)) { ++ dev_dbg(dev, "Cannot create write-only mapping\n"); ++ fput(file); ++ return -EACCES; ++ } ++ ++ writable = file->f_mode & FMODE_WRITE; ++ prot = writable ? PAGE_KERNEL : PAGE_KERNEL_RO; ++ fput(file); ++ file = NULL; ++ ++ ret = tipc_shared_handle_new(&shared_handle, dn->chan->vds); ++ if (ret) ++ return ret; ++ ++ shared_handle->dma_buf = dma_buf_get(fd); ++ if (IS_ERR(shared_handle->dma_buf)) { ++ ret = PTR_ERR(shared_handle->dma_buf); ++ shared_handle->dma_buf = NULL; ++ dev_dbg(dev, "Unable to get dma buf from fd (%d)\n", ret); ++ goto cleanup_handle; ++ } ++ ++ tag = trusty_dma_buf_get_ffa_tag(shared_handle->dma_buf); ++ ret = trusty_dma_buf_get_shared_mem_id(shared_handle->dma_buf, &mem_id); ++ /* ++ * Buffers with a preallocated mem_id should only be sent to Trusty ++ * using TRUSTY_SEND_SECURE. And conversely, TRUSTY_SEND_SECURE should ++ * only be used to send buffers with preallcoated mem_id. ++ */ ++ if (!ret) { ++ /* Use shared memory ID owned by dma_buf */ ++ /* TODO: Enforce transfer_kind == TRUSTY_SEND_SECURE */ ++ WARN_ONCE(transfer_kind != TRUSTY_SEND_SECURE, ++ "Use TRUSTY_SEND_SECURE instead"); ++ goto mem_id_allocated; ++ } ++ ++ if (ret != -ENODATA) { ++ dev_err(dev, "dma_buf can't be transferred (%d)\n", ret); ++ goto cleanup_handle; ++ } ++ ++ if (transfer_kind == TRUSTY_SEND_SECURE) { ++ dev_err(dev, "No mem ID for TRUSTY_SEND_SECURE\n"); ++ goto cleanup_handle; ++ } ++ lend = (transfer_kind == TRUSTY_LEND); ++ ++ shared_handle->attach = dma_buf_attach(shared_handle->dma_buf, dev); ++ if (IS_ERR(shared_handle->attach)) { ++ ret = PTR_ERR(shared_handle->attach); ++ shared_handle->attach = NULL; ++ dev_dbg(dev, "Unable to attach to dma_buf (%d)\n", ret); ++ goto cleanup_handle; ++ } ++ ++ shared_handle->sgt = dma_buf_map_attachment(shared_handle->attach, ++ DMA_BIDIRECTIONAL); ++ if (IS_ERR(shared_handle->sgt)) { ++ ret = PTR_ERR(shared_handle->sgt); ++ shared_handle->sgt = NULL; ++ dev_dbg(dev, "Failed to match attachment (%d)\n", ret); ++ goto cleanup_handle; ++ } ++ ++ ret = trusty_transfer_memory(tipc_shared_handle_dev(shared_handle), ++ &mem_id, shared_handle->sgt->sgl, ++ shared_handle->sgt->orig_nents, prot, tag, ++ lend); ++ ++ if (ret < 0) { ++ dev_dbg(dev, "Transferring memory failed: %d\n", ret); ++ /* ++ * The handle now has a sgt containing the pages, so we no ++ * longer need to clean up the pages directly. ++ */ ++ goto cleanup_handle; ++ } ++ shared_handle->shared = true; ++ ++mem_id_allocated: ++ shared_handle->tipc.obj_id = mem_id; ++ shared_handle->tipc.size = shared_handle->dma_buf->size; ++ shared_handle->tipc.tag = tag; ++ *out = shared_handle; ++ return 0; ++ ++cleanup_handle: ++ tipc_shared_handle_drop(shared_handle); ++ return ret; ++} ++ ++static ssize_t txbuf_write_iter(struct tipc_msg_buf *txbuf, ++ struct iov_iter *iter) ++{ ++ size_t len; ++ /* message length */ ++ len = iov_iter_count(iter); ++ ++ /* check available space */ ++ if (len > mb_avail_space(txbuf)) ++ return -EMSGSIZE; ++ ++ /* copy in message data */ ++ if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len) ++ return -EFAULT; ++ ++ return len; ++} ++ ++static ssize_t txbuf_write_handles(struct tipc_msg_buf *txbuf, ++ struct tipc_shared_handle **shm_handles, ++ size_t shm_cnt) ++{ ++ size_t idx; ++ ++ /* message length */ ++ size_t len = shm_cnt * sizeof(struct tipc_shm); ++ ++ /* check available space */ ++ if (len > mb_avail_space(txbuf)) ++ return -EMSGSIZE; ++ ++ /* copy over handles */ ++ for (idx = 0; idx < shm_cnt; idx++) { ++ memcpy(mb_put_data(txbuf, sizeof(struct tipc_shm)), ++ &shm_handles[idx]->tipc, ++ sizeof(struct tipc_shm)); ++ } ++ ++ txbuf->shm_cnt += shm_cnt; ++ ++ return len; ++} ++ ++static long filp_send_ioctl(struct file *filp, ++ const struct tipc_send_msg_req __user *arg) ++{ ++ struct tipc_send_msg_req req; ++ struct iovec fast_iovs[UIO_FASTIOV]; ++ struct iovec *iov = fast_iovs; ++ struct iov_iter iter; ++ struct trusty_shm *shm = NULL; ++ struct tipc_shared_handle **shm_handles = NULL; ++ int shm_idx = 0; ++ int release_idx; ++ struct tipc_dn_chan *dn = filp->private_data; ++ struct tipc_virtio_dev *vds = dn->chan->vds; ++ struct device *dev = &vds->vdev->dev; ++ long timeout = TXBUF_TIMEOUT; ++ struct tipc_msg_buf *txbuf = NULL; ++ long ret = 0; ++ ssize_t data_len = 0; ++ ssize_t shm_len = 0; ++ ++ if (copy_from_user(&req, arg, sizeof(req))) ++ return -EFAULT; ++ ++ if (req.shm_cnt > U16_MAX) ++ return -E2BIG; ++ ++ shm = kmalloc_array(req.shm_cnt, sizeof(*shm), GFP_KERNEL); ++ if (!shm) ++ return -ENOMEM; ++ ++ shm_handles = kmalloc_array(req.shm_cnt, sizeof(*shm_handles), ++ GFP_KERNEL); ++ if (!shm_handles) { ++ ret = -ENOMEM; ++ goto shm_handles_alloc_failed; ++ } ++ ++ if (copy_from_user(shm, u64_to_user_ptr(req.shm), ++ req.shm_cnt * sizeof(struct trusty_shm))) { ++ ret = -EFAULT; ++ goto load_shm_args_failed; ++ } ++ ++ ret = import_iovec(READ, u64_to_user_ptr(req.iov), req.iov_cnt, ++ ARRAY_SIZE(fast_iovs), &iov, &iter); ++ if (ret < 0) { ++ dev_dbg(dev, "Failed to import iovec\n"); ++ goto iov_import_failed; ++ } ++ ++ for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++) { ++ switch (shm[shm_idx].transfer) { ++ case TRUSTY_SHARE: ++ case TRUSTY_LEND: ++ case TRUSTY_SEND_SECURE: ++ break; ++ default: ++ dev_err(dev, "Unknown transfer type: 0x%x\n", ++ shm[shm_idx].transfer); ++ goto shm_share_failed; ++ } ++ ret = dn_share_fd(dn, shm[shm_idx].fd, shm[shm_idx].transfer, ++ &shm_handles[shm_idx]); ++ if (ret) { ++ dev_dbg(dev, "Forwarding memory failed\n" ++ ); ++ goto shm_share_failed; ++ } ++ } ++ ++ if (filp->f_flags & O_NONBLOCK) ++ timeout = 0; ++ ++ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout); ++ if (IS_ERR(txbuf)) { ++ dev_dbg(dev, "Failed to get txbuffer\n"); ++ ret = PTR_ERR(txbuf); ++ goto get_txbuf_failed; ++ } ++ ++ data_len = txbuf_write_iter(txbuf, &iter); ++ if (data_len < 0) { ++ ret = data_len; ++ goto txbuf_write_failed; ++ } ++ ++ shm_len = txbuf_write_handles(txbuf, shm_handles, req.shm_cnt); ++ if (shm_len < 0) { ++ ret = shm_len; ++ goto txbuf_write_failed; ++ } ++ ++ /* ++ * These need to be aded to the index before queueing the message. ++ * As soon as the message is sent, we may receive a message back from ++ * Trusty saying it's no longer in use, and the shared_handle needs ++ * to be there when that happens. ++ */ ++ for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++) ++ tipc_shared_handle_register(shm_handles[shm_idx]); ++ ++ ret = tipc_chan_queue_msg(dn->chan, txbuf); ++ ++ if (ret) ++ goto queue_failed; ++ ++ ret = data_len; ++ ++common_cleanup: ++ kfree(iov); ++iov_import_failed: ++load_shm_args_failed: ++ kfree(shm_handles); ++shm_handles_alloc_failed: ++ kfree(shm); ++ return ret; ++ ++queue_failed: ++ for (release_idx = 0; release_idx < req.shm_cnt; release_idx++) ++ tipc_shared_handle_take(vds, ++ shm_handles[release_idx]->tipc.obj_id); ++txbuf_write_failed: ++ tipc_chan_put_txbuf(dn->chan, txbuf); ++get_txbuf_failed: ++shm_share_failed: ++ for (shm_idx--; shm_idx >= 0; shm_idx--) ++ tipc_shared_handle_drop(shm_handles[shm_idx]); ++ goto common_cleanup; ++} ++ ++static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ switch (cmd) { ++ case TIPC_IOC_CONNECT: ++ return dn_connect_ioctl(dn, (char __user *)arg); ++ case TIPC_IOC_SEND_MSG: ++ return filp_send_ioctl(filp, ++ (const struct tipc_send_msg_req __user *) ++ arg); ++ default: ++ dev_dbg(&dn->chan->vds->vdev->dev, ++ "Unhandled ioctl cmd: 0x%x\n", cmd); ++ return -ENOTTY; ++ } ++} ++ ++#ifdef CONFIG_COMPAT ++static long tipc_compat_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ switch (cmd) { ++ case TIPC_IOC32_CONNECT: ++ cmd = TIPC_IOC_CONNECT; ++ break; ++ default: ++ dev_dbg(&dn->chan->vds->vdev->dev, ++ "Unhandled compat ioctl command: 0x%x\n", cmd); ++ return -ENOTTY; ++ } ++ return tipc_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); ++} ++#endif ++ ++static inline bool _got_rx(struct tipc_dn_chan *dn) ++{ ++ if (dn->state != TIPC_CONNECTED) ++ return true; ++ ++ if (!list_empty(&dn->rx_msg_queue)) ++ return true; ++ ++ return false; ++} ++ ++static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter) ++{ ++ ssize_t ret; ++ size_t len; ++ struct tipc_msg_buf *mb; ++ struct file *filp = iocb->ki_filp; ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ mutex_lock(&dn->lock); ++ ++ while (list_empty(&dn->rx_msg_queue)) { ++ if (dn->state != TIPC_CONNECTED) { ++ if (dn->state == TIPC_CONNECTING) ++ ret = -ENOTCONN; ++ else if (dn->state == TIPC_DISCONNECTED) ++ ret = -ENOTCONN; ++ else if (dn->state == TIPC_STALE) ++ ret = -ESHUTDOWN; ++ else ++ ret = -EBADFD; ++ goto out; ++ } ++ ++ mutex_unlock(&dn->lock); ++ ++ if (filp->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ ++ if (wait_event_interruptible(dn->readq, _got_rx(dn))) ++ return -ERESTARTSYS; ++ ++ mutex_lock(&dn->lock); ++ } ++ ++ mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node); ++ ++ len = mb_avail_data(mb); ++ if (len > iov_iter_count(iter)) { ++ ret = -EMSGSIZE; ++ goto out; ++ } ++ ++ if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ ret = len; ++ list_del(&mb->node); ++ tipc_chan_put_rxbuf(dn->chan, mb); ++ ++out: ++ mutex_unlock(&dn->lock); ++ return ret; ++} ++ ++static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter) ++{ ++ struct file *filp = iocb->ki_filp; ++ struct tipc_dn_chan *dn = filp->private_data; ++ long timeout = TXBUF_TIMEOUT; ++ struct tipc_msg_buf *txbuf = NULL; ++ ssize_t ret = 0; ++ ssize_t len = 0; ++ ++ if (filp->f_flags & O_NONBLOCK) ++ timeout = 0; ++ ++ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout); ++ ++ if (IS_ERR(txbuf)) ++ return PTR_ERR(txbuf); ++ ++ len = txbuf_write_iter(txbuf, iter); ++ if (len < 0) ++ goto err_out; ++ ++ /* queue message */ ++ ret = tipc_chan_queue_msg(dn->chan, txbuf); ++ if (ret) ++ goto err_out; ++ ++ return len; ++ ++err_out: ++ tipc_chan_put_txbuf(dn->chan, txbuf); ++ return ret; ++} ++ ++static __poll_t tipc_poll(struct file *filp, poll_table *wait) ++{ ++ __poll_t mask = 0; ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ mutex_lock(&dn->lock); ++ ++ poll_wait(filp, &dn->readq, wait); ++ ++ /* Writes always succeed for now */ ++ mask |= EPOLLOUT | EPOLLWRNORM; ++ ++ if (!list_empty(&dn->rx_msg_queue)) ++ mask |= EPOLLIN | EPOLLRDNORM; ++ ++ if (dn->state != TIPC_CONNECTED) ++ mask |= EPOLLERR; ++ ++ mutex_unlock(&dn->lock); ++ return mask; ++} ++ ++ ++static int tipc_release(struct inode *inode, struct file *filp) ++{ ++ struct tipc_dn_chan *dn = filp->private_data; ++ ++ dn_shutdown(dn); ++ ++ /* free all pending buffers */ ++ vds_free_msg_buf_list(dn->chan->vds, &dn->rx_msg_queue); ++ ++ /* shutdown channel */ ++ tipc_chan_shutdown(dn->chan); ++ ++ /* and destroy it */ ++ tipc_chan_destroy(dn->chan); ++ ++ return 0; ++} ++ ++static const struct file_operations tipc_fops = { ++ .open = tipc_open, ++ .release = tipc_release, ++ .unlocked_ioctl = tipc_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = tipc_compat_ioctl, ++#endif ++ .read_iter = tipc_read_iter, ++ .write_iter = tipc_write_iter, ++ .poll = tipc_poll, ++ .owner = THIS_MODULE, ++}; ++ ++/*****************************************************************************/ ++ ++static void chan_trigger_event(struct tipc_chan *chan, int event) ++{ ++ if (!event) ++ return; ++ ++ chan->ops->handle_event(chan->ops_arg, event); ++} ++ ++static void _cleanup_vq(struct tipc_virtio_dev *vds, struct virtqueue *vq) ++{ ++ struct tipc_msg_buf *mb; ++ ++ while ((mb = virtqueue_detach_unused_buf(vq)) != NULL) ++ vds_free_msg_buf(vds, mb); ++} ++ ++static int _create_cdev_node(struct device *parent, ++ struct tipc_cdev_node *cdn, ++ const char *name) ++{ ++ int ret; ++ dev_t devt; ++ ++ if (!name) { ++ dev_dbg(parent, "%s: cdev name has to be provided\n", ++ __func__); ++ return -EINVAL; ++ } ++ ++ /* allocate minor */ ++ ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES, GFP_KERNEL); ++ if (ret < 0) { ++ dev_dbg(parent, "%s: failed (%d) to get id\n", ++ __func__, ret); ++ return ret; ++ } ++ ++ cdn->minor = ret; ++ cdev_init(&cdn->cdev, &tipc_fops); ++ cdn->cdev.owner = THIS_MODULE; ++ ++ /* Add character device */ ++ devt = MKDEV(tipc_major, cdn->minor); ++ ret = cdev_add(&cdn->cdev, devt, 1); ++ if (ret) { ++ dev_dbg(parent, "%s: cdev_add failed (%d)\n", ++ __func__, ret); ++ goto err_add_cdev; ++ } ++ ++ /* Create a device node */ ++ cdn->dev = device_create(tipc_class, parent, ++ devt, NULL, "trusty-ipc-%s", name); ++ if (IS_ERR(cdn->dev)) { ++ ret = PTR_ERR(cdn->dev); ++ dev_dbg(parent, "%s: device_create failed: %d\n", ++ __func__, ret); ++ goto err_device_create; ++ } ++ ++ return 0; ++ ++err_device_create: ++ cdn->dev = NULL; ++ cdev_del(&cdn->cdev); ++err_add_cdev: ++ idr_remove(&tipc_devices, cdn->minor); ++ return ret; ++} ++ ++static void create_cdev_node(struct tipc_virtio_dev *vds, ++ struct tipc_cdev_node *cdn) ++{ ++ int err; ++ ++ mutex_lock(&tipc_devices_lock); ++ ++ if (!default_vdev) { ++ kref_get(&vds->refcount); ++ default_vdev = vds->vdev; ++ } ++ ++ if (vds->cdev_name[0] && !cdn->dev) { ++ kref_get(&vds->refcount); ++ err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name); ++ if (err) { ++ dev_err(&vds->vdev->dev, ++ "failed (%d) to create cdev node\n", err); ++ kref_put(&vds->refcount, _free_vds); ++ } ++ } ++ mutex_unlock(&tipc_devices_lock); ++} ++ ++static void destroy_cdev_node(struct tipc_virtio_dev *vds, ++ struct tipc_cdev_node *cdn) ++{ ++ mutex_lock(&tipc_devices_lock); ++ if (cdn->dev) { ++ device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor)); ++ cdev_del(&cdn->cdev); ++ idr_remove(&tipc_devices, cdn->minor); ++ cdn->dev = NULL; ++ kref_put(&vds->refcount, _free_vds); ++ } ++ ++ if (default_vdev == vds->vdev) { ++ default_vdev = NULL; ++ kref_put(&vds->refcount, _free_vds); ++ } ++ ++ mutex_unlock(&tipc_devices_lock); ++} ++ ++static void _go_online(struct tipc_virtio_dev *vds) ++{ ++ mutex_lock(&vds->lock); ++ if (vds->state == VDS_OFFLINE) ++ vds->state = VDS_ONLINE; ++ mutex_unlock(&vds->lock); ++ ++ create_cdev_node(vds, &vds->cdev_node); ++ ++ dev_info(&vds->vdev->dev, "is online\n"); ++} ++ ++static void _go_offline(struct tipc_virtio_dev *vds) ++{ ++ struct tipc_chan *chan; ++ ++ /* change state to OFFLINE */ ++ mutex_lock(&vds->lock); ++ if (vds->state != VDS_ONLINE) { ++ mutex_unlock(&vds->lock); ++ return; ++ } ++ vds->state = VDS_OFFLINE; ++ mutex_unlock(&vds->lock); ++ ++ /* wakeup all waiters */ ++ wake_up_interruptible_all(&vds->sendq); ++ ++ /* shutdown all channels */ ++ while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) { ++ mutex_lock(&chan->lock); ++ chan->state = TIPC_STALE; ++ chan->remote = 0; ++ chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN); ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++ } ++ ++ /* shutdown device node */ ++ destroy_cdev_node(vds, &vds->cdev_node); ++ ++ dev_info(&vds->vdev->dev, "is offline\n"); ++} ++ ++static void _handle_conn_rsp(struct tipc_virtio_dev *vds, ++ struct tipc_conn_rsp_body *rsp, size_t len) ++{ ++ struct tipc_chan *chan; ++ ++ if (sizeof(*rsp) != len) { ++ dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n", ++ __func__, len); ++ return; ++ } ++ ++ dev_dbg(&vds->vdev->dev, ++ "%s: connection response: for addr 0x%x: status %d remote addr 0x%x\n", ++ __func__, rsp->target, rsp->status, rsp->remote); ++ ++ /* Lookup channel */ ++ chan = vds_lookup_channel(vds, rsp->target); ++ if (chan) { ++ mutex_lock(&chan->lock); ++ if (chan->state == TIPC_CONNECTING) { ++ if (!rsp->status) { ++ chan->state = TIPC_CONNECTED; ++ chan->remote = rsp->remote; ++ chan->max_msg_cnt = rsp->max_msg_cnt; ++ chan->max_msg_size = rsp->max_msg_size; ++ chan_trigger_event(chan, ++ TIPC_CHANNEL_CONNECTED); ++ } else { ++ chan->state = TIPC_DISCONNECTED; ++ chan->remote = 0; ++ chan_trigger_event(chan, ++ TIPC_CHANNEL_DISCONNECTED); ++ } ++ } ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++ } ++} ++ ++static void _handle_disc_req(struct tipc_virtio_dev *vds, ++ struct tipc_disc_req_body *req, size_t len) ++{ ++ struct tipc_chan *chan; ++ ++ if (sizeof(*req) != len) { ++ dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n", ++ __func__, len); ++ return; ++ } ++ ++ dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n", ++ __func__, req->target); ++ ++ chan = vds_lookup_channel(vds, req->target); ++ if (chan) { ++ mutex_lock(&chan->lock); ++ if (chan->state == TIPC_CONNECTED || ++ chan->state == TIPC_CONNECTING) { ++ chan->state = TIPC_DISCONNECTED; ++ chan->remote = 0; ++ chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED); ++ } ++ mutex_unlock(&chan->lock); ++ kref_put(&chan->refcount, _free_chan); ++ } ++} ++ ++static void _handle_release(struct tipc_virtio_dev *vds, ++ struct tipc_release_body *req, size_t len) ++{ ++ struct tipc_shared_handle *handle = NULL; ++ struct device *dev = &vds->vdev->dev; ++ int ret = 0; ++ ++ if (len < sizeof(*req)) { ++ dev_err(dev, "Received undersized release control message\n"); ++ return; ++ } ++ ++ handle = tipc_shared_handle_take(vds, req->id); ++ if (!handle) { ++ dev_err(dev, ++ "Received release control message for untracked handle: 0x%llx\n", ++ req->id); ++ return; ++ } ++ ++ ret = tipc_shared_handle_drop(handle); ++ ++ if (ret) { ++ dev_err(dev, ++ "Failed to release handle 0x%llx upon request: (%d)\n", ++ req->id, ret); ++ /* ++ * Put the handle back in case we got a spurious release now and ++ * get a real one later. This path should not happen, we're ++ * just trying to be robust. ++ */ ++ tipc_shared_handle_register(handle); ++ } ++} ++ ++static void _handle_ctrl_msg(struct tipc_virtio_dev *vds, ++ void *data, int len, u32 src) ++{ ++ struct tipc_ctrl_msg *msg = data; ++ ++ if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) { ++ dev_err(&vds->vdev->dev, ++ "%s: Invalid message length ( %d vs. %d)\n", ++ __func__, (int)(sizeof(*msg) + msg->body_len), len); ++ return; ++ } ++ ++ dev_dbg(&vds->vdev->dev, ++ "%s: Incoming ctrl message: src 0x%x type %d len %d\n", ++ __func__, src, msg->type, msg->body_len); ++ ++ switch (msg->type) { ++ case TIPC_CTRL_MSGTYPE_GO_ONLINE: ++ _go_online(vds); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_GO_OFFLINE: ++ _go_offline(vds); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_CONN_RSP: ++ _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body, ++ msg->body_len); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_DISC_REQ: ++ _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body, ++ msg->body_len); ++ break; ++ ++ case TIPC_CTRL_MSGTYPE_RELEASE: ++ _handle_release(vds, (struct tipc_release_body *)msg->body, ++ msg->body_len); ++ break; ++ ++ default: ++ dev_warn(&vds->vdev->dev, ++ "%s: Unexpected message type: %d\n", ++ __func__, msg->type); ++ } ++} ++ ++static void handle_dropped_chan_msg(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb, ++ struct tipc_msg_hdr *msg) ++{ ++ int shm_idx; ++ struct tipc_shm *shm; ++ struct tipc_shared_handle *shared_handle; ++ struct device *dev = &vds->vdev->dev; ++ size_t len; ++ ++ if (msg->len < msg->shm_cnt * sizeof(*shm)) { ++ dev_err(dev, "shm_cnt does not fit in dropped message"); ++ /* The message is corrupt, so we can't recover resources */ ++ return; ++ } ++ ++ len = msg->len - msg->shm_cnt * sizeof(*shm); ++ /* skip normal data */ ++ (void)mb_get_data(mb, len); ++ ++ for (shm_idx = 0; shm_idx < msg->shm_cnt; shm_idx++) { ++ shm = mb_get_data(mb, sizeof(*shm)); ++ shared_handle = tipc_shared_handle_take(vds, shm->obj_id); ++ if (shared_handle) { ++ if (tipc_shared_handle_drop(shared_handle)) ++ dev_err(dev, ++ "Failed to drop handle found in dropped buffer"); ++ } else { ++ dev_err(dev, ++ "Found handle in dropped buffer which was not registered to tipc device..."); ++ } ++ } ++} ++ ++static void handle_dropped_mb(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *mb) ++{ ++ struct tipc_msg_hdr *msg; ++ ++ mb_reset_read(mb); ++ msg = mb_get_data(mb, sizeof(*msg)); ++ if (msg->dst != TIPC_CTRL_ADDR) { ++ handle_dropped_chan_msg(vds, mb, msg); ++ } ++} ++ ++static int _handle_rxbuf(struct tipc_virtio_dev *vds, ++ struct tipc_msg_buf *rxbuf, size_t rxlen) ++{ ++ int err; ++ struct scatterlist sg; ++ struct tipc_msg_hdr *msg; ++ struct device *dev = &vds->vdev->dev; ++ ++ /* message sanity check */ ++ if (rxlen > rxbuf->buf_sz) { ++ dev_warn(dev, "inbound msg is too big: %zd\n", rxlen); ++ goto drop_it; ++ } ++ ++ if (rxlen < sizeof(*msg)) { ++ dev_warn(dev, "inbound msg is too short: %zd\n", rxlen); ++ goto drop_it; ++ } ++ ++ /* reset buffer and put data */ ++ mb_reset(rxbuf); ++ mb_put_data(rxbuf, rxlen); ++ ++ /* get message header */ ++ msg = mb_get_data(rxbuf, sizeof(*msg)); ++ if (mb_avail_data(rxbuf) != msg->len) { ++ dev_warn(dev, "inbound msg length mismatch: (%zu vs. %d)\n", ++ mb_avail_data(rxbuf), msg->len); ++ goto drop_it; ++ } ++ ++ dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d, shm_cnt: %d\n", ++ msg->src, msg->dst, msg->len, msg->flags, msg->reserved, ++ msg->shm_cnt); ++ ++ /* message directed to control endpoint is a special case */ ++ if (msg->dst == TIPC_CTRL_ADDR) { ++ _handle_ctrl_msg(vds, msg->data, msg->len, msg->src); ++ } else { ++ struct tipc_chan *chan = NULL; ++ /* Lookup channel */ ++ chan = vds_lookup_channel(vds, msg->dst); ++ if (chan) { ++ /* handle it */ ++ rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf); ++ kref_put(&chan->refcount, _free_chan); ++ if (WARN_ON(!rxbuf)) ++ return -EINVAL; ++ } ++ } ++ ++drop_it: ++ /* add the buffer back to the virtqueue */ ++ sg_init_one(&sg, rxbuf, rxbuf->buf_sz); ++ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); ++ if (err < 0) { ++ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void _rxvq_cb(struct virtqueue *rxvq) ++{ ++ unsigned int len; ++ struct tipc_msg_buf *mb; ++ unsigned int msg_cnt = 0; ++ struct tipc_virtio_dev *vds = rxvq->vdev->priv; ++ ++ while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) { ++ if (_handle_rxbuf(vds, mb, len)) ++ break; ++ msg_cnt++; ++ } ++ ++ /* tell the other size that we added rx buffers */ ++ if (msg_cnt) ++ virtqueue_kick(rxvq); ++} ++ ++static void _txvq_cb(struct virtqueue *txvq) ++{ ++ unsigned int len; ++ struct tipc_msg_buf *mb; ++ bool need_wakeup = false; ++ struct tipc_virtio_dev *vds = txvq->vdev->priv; ++ ++ /* detach all buffers */ ++ mutex_lock(&vds->lock); ++ while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) { ++ if ((int)len < 0) ++ handle_dropped_mb(vds, mb); ++ need_wakeup |= _put_txbuf_locked(vds, mb); ++ } ++ mutex_unlock(&vds->lock); ++ ++ if (need_wakeup) { ++ /* wake up potential senders waiting for a tx buffer */ ++ wake_up_interruptible_all(&vds->sendq); ++ } ++} ++ ++static int tipc_virtio_probe(struct virtio_device *vdev) ++{ ++ int err, i; ++ struct tipc_virtio_dev *vds; ++ struct tipc_dev_config config; ++ struct virtqueue *vqs[2]; ++ vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; ++ static const char * const vq_names[] = { "rx", "tx" }; ++ ++ vds = kzalloc(sizeof(*vds), GFP_KERNEL); ++ if (!vds) ++ return -ENOMEM; ++ ++ vds->vdev = vdev; ++ ++ mutex_init(&vds->lock); ++ mutex_init(&vds->shared_handles_lock); ++ kref_init(&vds->refcount); ++ init_waitqueue_head(&vds->sendq); ++ INIT_LIST_HEAD(&vds->free_buf_list); ++ idr_init(&vds->addr_idr); ++ vds->shared_handles = RB_ROOT; ++ dma_coerce_mask_and_coherent(&vds->vdev->dev, ++ *vds->vdev->dev.parent->parent->dma_mask); ++ ++ /* set default max message size and alignment */ ++ memset(&config, 0, sizeof(config)); ++ config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE; ++ config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN; ++ ++ /* get configuration if present */ ++ vdev->config->get(vdev, 0, &config, sizeof(config)); ++ ++ /* copy dev name */ ++ strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name)); ++ vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; ++ ++ /* find tx virtqueues (rx and tx and in this order) */ ++ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL, ++ NULL); ++ if (err) ++ goto err_find_vqs; ++ ++ vds->rxvq = vqs[0]; ++ vds->txvq = vqs[1]; ++ ++ /* save max buffer size and count */ ++ vds->msg_buf_max_sz = config.msg_buf_max_size; ++ vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq); ++ ++ /* set up the receive buffers */ ++ for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) { ++ struct scatterlist sg; ++ struct tipc_msg_buf *rxbuf; ++ ++ rxbuf = vds_alloc_msg_buf(vds, true); ++ if (!rxbuf) { ++ dev_err(&vdev->dev, "failed to allocate rx buffer\n"); ++ err = -ENOMEM; ++ goto err_free_rx_buffers; ++ } ++ ++ sg_init_one(&sg, rxbuf, rxbuf->buf_sz); ++ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); ++ WARN_ON(err); /* sanity check; this can't really happen */ ++ } ++ ++ vdev->priv = vds; ++ vds->state = VDS_OFFLINE; ++ ++ dev_dbg(&vdev->dev, "%s: done\n", __func__); ++ return 0; ++ ++err_free_rx_buffers: ++ _cleanup_vq(vds, vds->rxvq); ++err_find_vqs: ++ kref_put(&vds->refcount, _free_vds); ++ return err; ++} ++ ++static void tipc_virtio_remove(struct virtio_device *vdev) ++{ ++ struct tipc_virtio_dev *vds = vdev->priv; ++ ++ _go_offline(vds); ++ ++ mutex_lock(&vds->lock); ++ vds->state = VDS_DEAD; ++ vds->vdev = NULL; ++ mutex_unlock(&vds->lock); ++ ++ vdev->config->reset(vdev); ++ ++ idr_destroy(&vds->addr_idr); ++ ++ _cleanup_vq(vds, vds->rxvq); ++ _cleanup_vq(vds, vds->txvq); ++ vds_free_msg_buf_list(vds, &vds->free_buf_list); ++ ++ vdev->config->del_vqs(vds->vdev); ++ ++ kref_put(&vds->refcount, _free_vds); ++} ++ ++static const struct virtio_device_id tipc_virtio_id_table[] = { ++ { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID }, ++ { 0 }, ++}; ++ ++static const unsigned int features[] = { ++ 0, ++}; ++ ++static struct virtio_driver virtio_tipc_driver = { ++ .feature_table = features, ++ .feature_table_size = ARRAY_SIZE(features), ++ .driver.name = KBUILD_MODNAME, ++ .driver.owner = THIS_MODULE, ++ .id_table = tipc_virtio_id_table, ++ .probe = tipc_virtio_probe, ++ .remove = tipc_virtio_remove, ++}; ++ ++static int __init tipc_init(void) ++{ ++ int ret; ++ dev_t dev; ++ ++ ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME); ++ if (ret) { ++ pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret); ++ return ret; ++ } ++ ++ tipc_major = MAJOR(dev); ++ tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME); ++ if (IS_ERR(tipc_class)) { ++ ret = PTR_ERR(tipc_class); ++ pr_err("%s: class_create failed: %d\n", __func__, ret); ++ goto err_class_create; ++ } ++ ++ ret = register_virtio_driver(&virtio_tipc_driver); ++ if (ret) { ++ pr_err("failed to register virtio driver: %d\n", ret); ++ goto err_register_virtio_drv; ++ } ++ ++ return 0; ++ ++err_register_virtio_drv: ++ class_destroy(tipc_class); ++ ++err_class_create: ++ unregister_chrdev_region(dev, MAX_DEVICES); ++ return ret; ++} ++ ++static void __exit tipc_exit(void) ++{ ++ unregister_virtio_driver(&virtio_tipc_driver); ++ class_destroy(tipc_class); ++ unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES); ++} ++ ++/* We need to init this early */ ++subsys_initcall(tipc_init); ++module_exit(tipc_exit); ++ ++MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table); ++MODULE_DESCRIPTION("Trusty IPC driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c +new file mode 100644 +index 000000000000..5c6076108d0e +--- /dev/null ++++ b/drivers/trusty/trusty-irq.c +@@ -0,0 +1,645 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2013 Google, Inc. ++ */ ++ ++#include <linux/cpu.h> ++#include <linux/interrupt.h> ++#include <linux/irq.h> ++#include <linux/irqdomain.h> ++#include <linux/module.h> ++#include <linux/of.h> ++#include <linux/of_irq.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/string.h> ++#include <linux/trusty/smcall.h> ++#include <linux/trusty/sm_err.h> ++#include <linux/trusty/trusty.h> ++ ++struct trusty_irq { ++ struct trusty_irq_state *is; ++ struct hlist_node node; ++ unsigned int irq; ++ bool percpu; ++ bool enable; ++ bool doorbell; ++ struct trusty_irq __percpu *percpu_ptr; ++}; ++ ++struct trusty_irq_irqset { ++ struct hlist_head pending; ++ struct hlist_head inactive; ++}; ++ ++struct trusty_irq_state { ++ struct device *dev; ++ struct device *trusty_dev; ++ struct trusty_irq_irqset normal_irqs; ++ spinlock_t normal_irqs_lock; ++ struct trusty_irq_irqset __percpu *percpu_irqs; ++ struct notifier_block trusty_call_notifier; ++ struct hlist_node cpuhp_node; ++}; ++ ++static int trusty_irq_cpuhp_slot = -1; ++ ++static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, ++ struct trusty_irq_irqset *irqset, ++ bool percpu) ++{ ++ struct hlist_node *n; ++ struct trusty_irq *trusty_irq; ++ ++ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { ++ dev_dbg(is->dev, ++ "%s: enable pending irq %d, percpu %d, cpu %d\n", ++ __func__, trusty_irq->irq, percpu, smp_processor_id()); ++ if (percpu) ++ enable_percpu_irq(trusty_irq->irq, 0); ++ else ++ enable_irq(trusty_irq->irq); ++ hlist_del(&trusty_irq->node); ++ hlist_add_head(&trusty_irq->node, &irqset->inactive); ++ } ++} ++ ++static void trusty_irq_enable_irqset(struct trusty_irq_state *is, ++ struct trusty_irq_irqset *irqset) ++{ ++ struct trusty_irq *trusty_irq; ++ ++ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { ++ if (trusty_irq->enable) { ++ dev_warn(is->dev, ++ "%s: percpu irq %d already enabled, cpu %d\n", ++ __func__, trusty_irq->irq, smp_processor_id()); ++ continue; ++ } ++ dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n", ++ __func__, trusty_irq->irq, smp_processor_id()); ++ enable_percpu_irq(trusty_irq->irq, 0); ++ trusty_irq->enable = true; ++ } ++} ++ ++static void trusty_irq_disable_irqset(struct trusty_irq_state *is, ++ struct trusty_irq_irqset *irqset) ++{ ++ struct hlist_node *n; ++ struct trusty_irq *trusty_irq; ++ ++ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { ++ if (!trusty_irq->enable) { ++ dev_warn(is->dev, ++ "irq %d already disabled, percpu %d, cpu %d\n", ++ trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ continue; ++ } ++ dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n", ++ __func__, trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ trusty_irq->enable = false; ++ if (trusty_irq->percpu) ++ disable_percpu_irq(trusty_irq->irq); ++ else ++ disable_irq_nosync(trusty_irq->irq); ++ } ++ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { ++ if (!trusty_irq->enable) { ++ dev_warn(is->dev, ++ "pending irq %d already disabled, percpu %d, cpu %d\n", ++ trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ } ++ dev_dbg(is->dev, ++ "%s: disable pending irq %d, percpu %d, cpu %d\n", ++ __func__, trusty_irq->irq, trusty_irq->percpu, ++ smp_processor_id()); ++ trusty_irq->enable = false; ++ hlist_del(&trusty_irq->node); ++ hlist_add_head(&trusty_irq->node, &irqset->inactive); ++ } ++} ++ ++static int trusty_irq_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_irq_state *is; ++ ++ if (WARN_ON(!irqs_disabled())) ++ return NOTIFY_DONE; ++ ++ if (action != TRUSTY_CALL_PREPARE) ++ return NOTIFY_DONE; ++ ++ is = container_of(nb, struct trusty_irq_state, trusty_call_notifier); ++ ++ spin_lock(&is->normal_irqs_lock); ++ trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false); ++ spin_unlock(&is->normal_irqs_lock); ++ trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true); ++ ++ return NOTIFY_OK; ++} ++ ++static irqreturn_t trusty_irq_handler(int irq, void *data) ++{ ++ struct trusty_irq *trusty_irq = data; ++ struct trusty_irq_state *is = trusty_irq->is; ++ struct trusty_irq_irqset *irqset; ++ ++ dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n", ++ __func__, irq, trusty_irq->irq, smp_processor_id(), ++ trusty_irq->enable); ++ ++ if (!trusty_irq->doorbell) { ++ if (trusty_irq->percpu) { ++ disable_percpu_irq(irq); ++ irqset = this_cpu_ptr(is->percpu_irqs); ++ } else { ++ disable_irq_nosync(irq); ++ irqset = &is->normal_irqs; ++ } ++ ++ spin_lock(&is->normal_irqs_lock); ++ if (trusty_irq->enable) { ++ hlist_del(&trusty_irq->node); ++ hlist_add_head(&trusty_irq->node, &irqset->pending); ++ } ++ spin_unlock(&is->normal_irqs_lock); ++ } ++ ++ trusty_enqueue_nop(is->trusty_dev, NULL); ++ ++ dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq); ++ ++ return IRQ_HANDLED; ++} ++ ++static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node) ++{ ++ unsigned long irq_flags; ++ struct trusty_irq_state *is; ++ ++ is = container_of(node, struct trusty_irq_state, cpuhp_node); ++ ++ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu); ++ ++ local_irq_save(irq_flags); ++ trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs)); ++ local_irq_restore(irq_flags); ++ ++ /* ++ * Temporary workaround blindly enqueuing work to force trusty scheduler ++ * to run after a cpu suspend. ++ * Root causing the workqueue being inappropriately empty ++ * (e.g. loss of an IPI) may make this workaround unnecessary ++ * in the future. ++ */ ++ trusty_enqueue_nop(is->trusty_dev, NULL); ++ ++ return 0; ++} ++ ++static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node) ++{ ++ unsigned long irq_flags; ++ struct trusty_irq_state *is; ++ ++ is = container_of(node, struct trusty_irq_state, cpuhp_node); ++ ++ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu); ++ ++ local_irq_save(irq_flags); ++ trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs)); ++ local_irq_restore(irq_flags); ++ ++ return 0; ++} ++ ++static int trusty_irq_map_ipi(struct trusty_irq_state *is, int irq) ++{ ++ int ret; ++ u32 ipi_range[3]; ++ struct device_node *gic; ++ struct of_phandle_args oirq = {}; ++ u32 beg, end, ipi_base; ++ ++ ret = of_property_read_u32_array(is->dev->of_node, "ipi-range", ++ ipi_range, ARRAY_SIZE(ipi_range)); ++ if (ret != 0) ++ return -ENODATA; ++ beg = ipi_range[0]; ++ end = ipi_range[1]; ++ ipi_base = ipi_range[2]; ++ ++ if (irq < beg || irq > end) ++ return -ENODATA; ++ ++ gic = of_irq_find_parent(is->dev->of_node); ++ if (!gic) ++ return -ENXIO; ++ ++ oirq.np = gic; ++ oirq.args_count = 1; ++ oirq.args[0] = ipi_base + (irq - beg); ++ ++ ret = irq_create_of_mapping(&oirq); ++ ++ of_node_put(gic); ++ return (!ret) ? -EINVAL : ret; ++} ++ ++static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq) ++{ ++ int ret; ++ int index; ++ u32 irq_pos; ++ u32 templ_idx; ++ u32 range_base; ++ u32 range_end; ++ struct of_phandle_args oirq; ++ ++ /* check if this is an IPI (inter-processor interrupt) */ ++ ret = trusty_irq_map_ipi(is, irq); ++ if (ret != -ENODATA) ++ return ret; ++ ++ /* check if "interrupt-ranges" property is present */ ++ if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) { ++ /* fallback to old behavior to be backward compatible with ++ * systems that do not need IRQ domains. ++ */ ++ return irq; ++ } ++ ++ /* find irq range */ ++ for (index = 0;; index += 3) { ++ ret = of_property_read_u32_index(is->dev->of_node, ++ "interrupt-ranges", ++ index, &range_base); ++ if (ret) ++ return ret; ++ ++ ret = of_property_read_u32_index(is->dev->of_node, ++ "interrupt-ranges", ++ index + 1, &range_end); ++ if (ret) ++ return ret; ++ ++ if (irq >= range_base && irq <= range_end) ++ break; ++ } ++ ++ /* read the rest of range entry: template index and irq_pos */ ++ ret = of_property_read_u32_index(is->dev->of_node, ++ "interrupt-ranges", ++ index + 2, &templ_idx); ++ if (ret) ++ return ret; ++ ++ /* read irq template */ ++ ret = of_parse_phandle_with_args(is->dev->of_node, ++ "interrupt-templates", ++ "#interrupt-cells", ++ templ_idx, &oirq); ++ if (ret) ++ return ret; ++ ++ WARN_ON(!oirq.np); ++ WARN_ON(!oirq.args_count); ++ ++ /* ++ * An IRQ template is a non empty array of u32 values describing group ++ * of interrupts having common properties. The u32 entry with index ++ * zero contains the position of irq_id in interrupt specifier array ++ * followed by data representing interrupt specifier array with irq id ++ * field omitted, so to convert irq template to interrupt specifier ++ * array we have to move down one slot the first irq_pos entries and ++ * replace the resulting gap with real irq id. ++ */ ++ irq_pos = oirq.args[0]; ++ ++ if (irq_pos >= oirq.args_count) { ++ dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos); ++ return -EINVAL; ++ } ++ ++ for (index = 1; index <= irq_pos; index++) ++ oirq.args[index - 1] = oirq.args[index]; ++ ++ oirq.args[irq_pos] = irq - range_base; ++ ++ ret = irq_create_of_mapping(&oirq); ++ ++ return (!ret) ? -EINVAL : ret; ++} ++ ++static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) ++{ ++ int ret; ++ int irq; ++ unsigned long irq_flags; ++ struct trusty_irq *trusty_irq; ++ ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); ++ ++ irq = trusty_irq_create_irq_mapping(is, tirq); ++ if (irq < 0) { ++ dev_err(is->dev, ++ "trusty_irq_create_irq_mapping failed (%d)\n", irq); ++ return irq; ++ } ++ ++ trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL); ++ if (!trusty_irq) ++ return -ENOMEM; ++ ++ trusty_irq->is = is; ++ trusty_irq->irq = irq; ++ trusty_irq->enable = true; ++ ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ ++ ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD, ++ "trusty", trusty_irq); ++ if (ret) { ++ dev_err(is->dev, "request_irq failed %d\n", ret); ++ goto err_request_irq; ++ } ++ return 0; ++ ++err_request_irq: ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ hlist_del(&trusty_irq->node); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ kfree(trusty_irq); ++ return ret; ++} ++ ++static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq, ++ unsigned int type) ++{ ++ int ret; ++ int irq; ++ unsigned int cpu; ++ struct trusty_irq __percpu *trusty_irq_handler_data; ++ ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); ++ ++ irq = trusty_irq_create_irq_mapping(is, tirq); ++ if (irq <= 0) { ++ dev_err(is->dev, ++ "trusty_irq_create_irq_mapping failed (%d)\n", irq); ++ return irq; ++ } ++ ++ trusty_irq_handler_data = alloc_percpu(struct trusty_irq); ++ if (!trusty_irq_handler_data) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq *trusty_irq; ++ struct trusty_irq_irqset *irqset; ++ ++ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); ++ irqset = per_cpu_ptr(is->percpu_irqs, cpu); ++ ++ trusty_irq->is = is; ++ hlist_add_head(&trusty_irq->node, &irqset->inactive); ++ trusty_irq->irq = irq; ++ trusty_irq->percpu = true; ++ trusty_irq->doorbell = type == TRUSTY_IRQ_TYPE_DOORBELL; ++ trusty_irq->percpu_ptr = trusty_irq_handler_data; ++ } ++ ++ ret = request_percpu_irq(irq, trusty_irq_handler, "trusty", ++ trusty_irq_handler_data); ++ if (ret) { ++ dev_err(is->dev, "request_percpu_irq failed %d\n", ret); ++ goto err_request_percpu_irq; ++ } ++ ++ return 0; ++ ++err_request_percpu_irq: ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq *trusty_irq; ++ ++ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); ++ hlist_del(&trusty_irq->node); ++ } ++ ++ free_percpu(trusty_irq_handler_data); ++ return ret; ++} ++ ++static int trusty_smc_get_next_irq(struct trusty_irq_state *is, ++ unsigned long min_irq, unsigned int type) ++{ ++ return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ, ++ min_irq, type, 0); ++} ++ ++static int trusty_irq_init_one(struct trusty_irq_state *is, ++ int irq, unsigned int type) ++{ ++ int ret; ++ ++ irq = trusty_smc_get_next_irq(is, irq, type); ++ if (irq < 0) ++ return irq; ++ ++ if (type != TRUSTY_IRQ_TYPE_NORMAL) ++ ret = trusty_irq_init_per_cpu_irq(is, irq, type); ++ else ++ ret = trusty_irq_init_normal_irq(is, irq); ++ ++ if (ret) { ++ dev_warn(is->dev, ++ "failed to initialize irq %d, irq will be ignored\n", ++ irq); ++ } ++ ++ return irq + 1; ++} ++ ++static void trusty_irq_free_irqs(struct trusty_irq_state *is) ++{ ++ struct trusty_irq *irq; ++ struct hlist_node *n; ++ unsigned int cpu; ++ ++ hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) { ++ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq); ++ free_irq(irq->irq, irq); ++ hlist_del(&irq->node); ++ kfree(irq); ++ } ++ hlist_for_each_entry_safe(irq, n, ++ &this_cpu_ptr(is->percpu_irqs)->inactive, ++ node) { ++ struct trusty_irq __percpu *trusty_irq_handler_data; ++ ++ dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq); ++ trusty_irq_handler_data = irq->percpu_ptr; ++ free_percpu_irq(irq->irq, trusty_irq_handler_data); ++ for_each_possible_cpu(cpu) { ++ struct trusty_irq *irq_tmp; ++ ++ irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu); ++ hlist_del(&irq_tmp->node); ++ } ++ free_percpu(trusty_irq_handler_data); ++ } ++} ++ ++static int trusty_irq_probe(struct platform_device *pdev) ++{ ++ int ret; ++ int irq; ++ unsigned long irq_flags; ++ struct trusty_irq_state *is; ++ ++ is = kzalloc(sizeof(*is), GFP_KERNEL); ++ if (!is) { ++ ret = -ENOMEM; ++ goto err_alloc_is; ++ } ++ ++ is->dev = &pdev->dev; ++ is->trusty_dev = is->dev->parent; ++ spin_lock_init(&is->normal_irqs_lock); ++ is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset); ++ if (!is->percpu_irqs) { ++ ret = -ENOMEM; ++ goto err_alloc_pending_percpu_irqs; ++ } ++ ++ platform_set_drvdata(pdev, is); ++ ++ is->trusty_call_notifier.notifier_call = trusty_irq_call_notify; ++ ret = trusty_call_notifier_register(is->trusty_dev, ++ &is->trusty_call_notifier); ++ if (ret) { ++ dev_err(&pdev->dev, ++ "failed to register trusty call notifier\n"); ++ goto err_trusty_call_notifier_register; ++ } ++ ++ for (irq = 0; irq >= 0;) ++ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_PER_CPU); ++ for (irq = 0; irq >= 0;) ++ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_NORMAL); ++ for (irq = 0; irq >= 0;) ++ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_DOORBELL); ++ ++ ret = cpuhp_state_add_instance(trusty_irq_cpuhp_slot, &is->cpuhp_node); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "cpuhp_state_add_instance failed %d\n", ++ ret); ++ goto err_add_cpuhp_instance; ++ } ++ ++ return 0; ++ ++err_add_cpuhp_instance: ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ trusty_irq_disable_irqset(is, &is->normal_irqs); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ trusty_irq_free_irqs(is); ++ trusty_call_notifier_unregister(is->trusty_dev, ++ &is->trusty_call_notifier); ++err_trusty_call_notifier_register: ++ free_percpu(is->percpu_irqs); ++err_alloc_pending_percpu_irqs: ++ kfree(is); ++err_alloc_is: ++ return ret; ++} ++ ++static int trusty_irq_remove(struct platform_device *pdev) ++{ ++ int ret; ++ unsigned long irq_flags; ++ struct trusty_irq_state *is = platform_get_drvdata(pdev); ++ ++ ret = cpuhp_state_remove_instance(trusty_irq_cpuhp_slot, ++ &is->cpuhp_node); ++ if (WARN_ON(ret)) ++ return ret; ++ ++ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); ++ trusty_irq_disable_irqset(is, &is->normal_irqs); ++ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); ++ ++ trusty_irq_free_irqs(is); ++ ++ trusty_call_notifier_unregister(is->trusty_dev, ++ &is->trusty_call_notifier); ++ free_percpu(is->percpu_irqs); ++ kfree(is); ++ ++ return 0; ++} ++ ++static const struct of_device_id trusty_test_of_match[] = { ++ { .compatible = "android,trusty-irq-v1", }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(trusty, trusty_test_of_match); ++ ++static struct platform_driver trusty_irq_driver = { ++ .probe = trusty_irq_probe, ++ .remove = trusty_irq_remove, ++ .driver = { ++ .name = "trusty-irq", ++ .of_match_table = trusty_test_of_match, ++ }, ++}; ++ ++static int __init trusty_irq_driver_init(void) ++{ ++ int ret; ++ ++ /* allocate dynamic cpuhp state slot */ ++ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, ++ "trusty-irq:cpu:online", ++ trusty_irq_cpu_up, ++ trusty_irq_cpu_down); ++ if (ret < 0) ++ return ret; ++ trusty_irq_cpuhp_slot = ret; ++ ++ /* Register platform driver */ ++ ret = platform_driver_register(&trusty_irq_driver); ++ if (ret < 0) ++ goto err_driver_register; ++ ++ return ret; ++ ++err_driver_register: ++ /* undo cpuhp slot allocation */ ++ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot); ++ trusty_irq_cpuhp_slot = -1; ++ ++ return ret; ++} ++ ++static void __exit trusty_irq_driver_exit(void) ++{ ++ platform_driver_unregister(&trusty_irq_driver); ++ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot); ++ trusty_irq_cpuhp_slot = -1; ++} ++ ++module_init(trusty_irq_driver_init); ++module_exit(trusty_irq_driver_exit); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty IRQ driver"); +diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c +new file mode 100644 +index 000000000000..7b279fe63766 +--- /dev/null ++++ b/drivers/trusty/trusty-log.c +@@ -0,0 +1,830 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2015 Google, Inc. ++ */ ++#include <linux/platform_device.h> ++#include <linux/trusty/smcall.h> ++#include <linux/trusty/trusty.h> ++#include <linux/notifier.h> ++#include <linux/scatterlist.h> ++#include <linux/slab.h> ++#include <linux/mm.h> ++#include <linux/mod_devicetable.h> ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/log2.h> ++#include <linux/miscdevice.h> ++#include <linux/poll.h> ++#include <linux/seq_file.h> ++#include <asm/page.h> ++#include "trusty-log.h" ++ ++/* ++ * Rationale for the chosen default log buffer size: ++ * - the log buffer shall contain unthrottled Trusty crash dump. ++ * - the register list portion of a crash dump is about 1KB ++ * - the memory-around-registers portion of a crash dump can be up to 12 KB ++ * - an average size backtrace is about 1 KB ++ * - average length of non-crash trusty logs during boot is about 85 characters ++ * - a crash dump with 50 lines of context therefore requires up to 18 KB ++ * - buffer size needs to be power-of-two number of bytes ++ * - rounding up to power of two from 18 KB gives 32 KB ++ * The log size can be adjusted by setting the "trusty_log.log_size" parameter ++ * on the kernel command line. The specified value will be adjusted as needed. ++ */ ++ ++#define TRUSTY_LOG_DEFAULT_SIZE (32768) ++#define TRUSTY_LOG_MIN_SIZE (PAGE_SIZE / 2) ++#define TRUSTY_LOG_MAX_SIZE (1 * 1024 * 1024 * 1024) ++#define TRUSTY_LINE_BUFFER_SIZE (256) ++ ++static size_t log_size_param = TRUSTY_LOG_DEFAULT_SIZE; ++ ++static int trusty_log_size_set(const char *val, const struct kernel_param *kp) ++{ ++ unsigned long long requested = memparse(val, NULL); ++ ++ if (requested < TRUSTY_LOG_MIN_SIZE) ++ requested = TRUSTY_LOG_MIN_SIZE; ++ if (requested > TRUSTY_LOG_MAX_SIZE) ++ requested = TRUSTY_LOG_MAX_SIZE; ++ requested = rounddown_pow_of_two(requested); ++ log_size_param = requested; ++ return 0; ++} ++ ++static int trusty_log_size_get(char *buffer, const struct kernel_param *kp) ++{ ++ sprintf(buffer, "%zu", log_size_param); ++ return strlen(buffer); ++} ++ ++module_param_call(log_size, trusty_log_size_set, trusty_log_size_get, NULL, ++ 0644); ++/* ++ * If we log too much and a UART or other slow source is connected, we can stall ++ * out another thread which is doing printk. ++ * ++ * Trusty crash logs are currently ~16 lines, so 100 should include context and ++ * the crash most of the time. ++ */ ++static struct ratelimit_state trusty_log_rate_limit = ++ RATELIMIT_STATE_INIT("trusty_log", 1 * HZ, 100); ++ ++/** ++ * struct trusty_log_sfile - trusty log misc device state ++ * ++ * @misc: misc device created for the trusty log virtual file ++ * @device_name: misc device name following the convention ++ * "trusty-<name><id>" ++ */ ++struct trusty_log_sfile { ++ struct miscdevice misc; ++ char device_name[64]; ++}; ++ ++/** ++ * struct trusty_log_sink_state - trusty log sink state ++ * ++ * @get: current read unwrapped index ++ * @trusty_panicked: trusty panic status at the start of the sink interation ++ * (only used for kernel log sink) ++ * @sfile: seq_file used for sinking to a virtual file (misc device); ++ * set to NULL for the kernel log sink. ++ * @ignore_overflow: ignore_overflow used to coalesce overflow messages and ++ * avoid reporting an overflow when sinking the oldest ++ * line to the virtual file (only used for virtual file sink) ++ * ++ * A sink state structure is used for both the kernel log sink ++ * and the virtual device sink. ++ * An instance of the sink state structure is dynamically created ++ * for each read iteration of the trusty log virtual file (misc device). ++ * ++ */ ++struct trusty_log_sink_state { ++ u32 get; ++ bool trusty_panicked; ++ ++ /* virtual file sink specific attributes */ ++ struct seq_file *sfile; ++ bool ignore_overflow; ++}; ++ ++struct trusty_log_state { ++ struct device *dev; ++ struct device *trusty_dev; ++ struct trusty_log_sfile log_sfile; ++ ++ struct log_rb *log; ++ struct trusty_log_sink_state klog_sink; ++ ++ u32 log_num_pages; ++ struct scatterlist *sg; ++ trusty_shared_mem_id_t log_pages_shared_mem_id; ++ ++ struct notifier_block call_notifier; ++ struct notifier_block panic_notifier; ++ char line_buffer[TRUSTY_LINE_BUFFER_SIZE]; ++ wait_queue_head_t poll_waiters; ++ /* this lock protects access to wake_put */ ++ spinlock_t wake_up_lock; ++ u32 last_wake_put; ++}; ++ ++static inline u32 u32_add_overflow(u32 a, u32 b) ++{ ++ u32 d; ++ ++ if (check_add_overflow(a, b, &d)) { ++ /* ++ * silence the overflow, ++ * what matters in the log buffer context ++ * is the casted addition ++ */ ++ } ++ return d; ++} ++ ++static inline u32 u32_sub_overflow(u32 a, u32 b) ++{ ++ u32 d; ++ ++ if (check_sub_overflow(a, b, &d)) { ++ /* ++ * silence the overflow, ++ * what matters in the log buffer context ++ * is the casted substraction ++ */ ++ } ++ return d; ++} ++ ++static int log_read_line(struct trusty_log_state *s, u32 put, u32 get) ++{ ++ struct log_rb *log = s->log; ++ int i; ++ char c = '\0'; ++ size_t max_to_read = ++ min_t(size_t, ++ u32_sub_overflow(put, get), ++ sizeof(s->line_buffer) - 1); ++ size_t mask = log->sz - 1; ++ ++ for (i = 0; i < max_to_read && c != '\n';) { ++ c = log->data[get & mask]; ++ s->line_buffer[i++] = c; ++ get = u32_add_overflow(get, 1); ++ } ++ s->line_buffer[i] = '\0'; ++ ++ return i; ++} ++ ++/** ++ * trusty_log_has_data() - returns true when more data is available to sink ++ * @s: Current log state. ++ * @sink: trusty_log_sink_state holding the get index on a given sink ++ * ++ * Return: true if data is available. ++ */ ++static bool trusty_log_has_data(struct trusty_log_state *s, ++ struct trusty_log_sink_state *sink) ++{ ++ struct log_rb *log = s->log; ++ ++ return (log->put != sink->get); ++} ++ ++/** ++ * trusty_log_start() - initialize the sink iteration either to kernel log ++ * or to secondary log_sfile ++ * @s: Current log state. ++ * @sink: trusty_log_sink_state holding the get index on a given sink ++ * @index: Unwrapped ring buffer index from where iteration shall start ++ * ++ * Return: 0 if successful, negative error code otherwise ++ */ ++static int trusty_log_start(struct trusty_log_state *s, ++ struct trusty_log_sink_state *sink, ++ u32 index) ++{ ++ struct log_rb *log; ++ ++ if (WARN_ON(!s)) ++ return -EINVAL; ++ ++ log = s->log; ++ if (WARN_ON(!is_power_of_2(log->sz))) ++ return -EINVAL; ++ ++ sink->get = index; ++ return 0; ++} ++ ++/** ++ * trusty_log_show() - sink log entry at current iteration ++ * @s: Current log state. ++ * @sink: trusty_log_sink_state holding the get index on a given sink ++ */ ++static void trusty_log_show(struct trusty_log_state *s, ++ struct trusty_log_sink_state *sink) ++{ ++ struct log_rb *log = s->log; ++ u32 alloc, put, get; ++ int read_chars; ++ ++ /* ++ * For this ring buffer, at any given point, alloc >= put >= get. ++ * The producer side of the buffer is not locked, so the put and alloc ++ * pointers must be read in a defined order (put before alloc) so ++ * that the above condition is maintained. A read barrier is needed ++ * to make sure the hardware and compiler keep the reads ordered. ++ */ ++ get = sink->get; ++ put = log->put; ++ ++ /* Make sure that the read of put occurs before the read of log data */ ++ rmb(); ++ ++ /* Read a line from the log */ ++ read_chars = log_read_line(s, put, get); ++ ++ /* Force the loads from log_read_line to complete. */ ++ rmb(); ++ alloc = log->alloc; ++ ++ /* ++ * Discard the line that was just read if the data could ++ * have been corrupted by the producer. ++ */ ++ if (u32_sub_overflow(alloc, get) > log->sz) { ++ /* ++ * this condition is acceptable in the case of the sfile sink ++ * when attempting to read the oldest entry (at alloc-log->sz) ++ * which may be overrun by a new one when ring buffer write ++ * index wraps around. ++ * So the overrun is not reported in case the oldest line ++ * was being read. ++ */ ++ if (sink->sfile) { ++ if (!sink->ignore_overflow) ++ seq_puts(sink->sfile, "log overflow.\n"); ++ /* coalesce subsequent contiguous overflows. */ ++ sink->ignore_overflow = true; ++ } else { ++ dev_err(s->dev, "log overflow.\n"); ++ } ++ sink->get = u32_sub_overflow(alloc, log->sz); ++ return; ++ } ++ /* compute next line index */ ++ sink->get = u32_add_overflow(get, read_chars); ++ /* once a line is valid, ignore_overflow must be disabled */ ++ sink->ignore_overflow = false; ++ if (sink->sfile) { ++ seq_printf(sink->sfile, "%s", s->line_buffer); ++ } else { ++ if (sink->trusty_panicked || ++ __ratelimit(&trusty_log_rate_limit)) { ++ dev_info(s->dev, "%s", s->line_buffer); ++ } ++ } ++} ++ ++static void *trusty_log_seq_start(struct seq_file *sfile, loff_t *pos) ++{ ++ struct trusty_log_sfile *lb; ++ struct trusty_log_state *s; ++ struct log_rb *log; ++ struct trusty_log_sink_state *log_sfile_sink; ++ u32 index; ++ int rc; ++ ++ if (WARN_ON(!pos)) ++ return ERR_PTR(-EINVAL); ++ ++ lb = sfile->private; ++ if (WARN_ON(!lb)) ++ return ERR_PTR(-EINVAL); ++ ++ log_sfile_sink = kzalloc(sizeof(*log_sfile_sink), GFP_KERNEL); ++ if (!log_sfile_sink) ++ return ERR_PTR(-ENOMEM); ++ ++ s = container_of(lb, struct trusty_log_state, log_sfile); ++ log_sfile_sink->sfile = sfile; ++ log = s->log; ++ if (*pos == 0) { ++ /* start at the oldest line */ ++ index = 0; ++ if (log->alloc > log->sz) ++ index = u32_sub_overflow(log->alloc, log->sz); ++ } else { ++ /* ++ * '*pos>0': pos hold the 32bits unwrapped index from where ++ * to start iterating ++ */ ++ index = (u32)*pos; ++ } ++ pr_debug("%s start=%u\n", __func__, index); ++ ++ log_sfile_sink->ignore_overflow = true; ++ rc = trusty_log_start(s, log_sfile_sink, index); ++ if (rc < 0) ++ goto free_sink; ++ ++ if (!trusty_log_has_data(s, log_sfile_sink)) ++ goto free_sink; ++ ++ return log_sfile_sink; ++ ++free_sink: ++ pr_debug("%s kfree\n", __func__); ++ kfree(log_sfile_sink); ++ return rc < 0 ? ERR_PTR(rc) : NULL; ++} ++ ++static void *trusty_log_seq_next(struct seq_file *sfile, void *v, loff_t *pos) ++{ ++ struct trusty_log_sfile *lb; ++ struct trusty_log_state *s; ++ struct trusty_log_sink_state *log_sfile_sink = v; ++ int rc = 0; ++ ++ if (WARN_ON(!log_sfile_sink)) ++ return ERR_PTR(-EINVAL); ++ ++ lb = sfile->private; ++ if (WARN_ON(!lb)) { ++ rc = -EINVAL; ++ goto end_of_iter; ++ } ++ s = container_of(lb, struct trusty_log_state, log_sfile); ++ ++ if (WARN_ON(!pos)) { ++ rc = -EINVAL; ++ goto end_of_iter; ++ } ++ /* ++ * When starting a virtual file sink, the start function is invoked ++ * with a pos argument which value is set to zero. ++ * Subsequent starts are invoked with pos being set to ++ * the unwrapped read index (get). ++ * Upon u32 wraparound, the get index could be reset to zero. ++ * Thus a msb is used to distinguish the `get` zero value ++ * from the `start of file` zero value. ++ */ ++ *pos = (1UL << 32) + log_sfile_sink->get; ++ if (!trusty_log_has_data(s, log_sfile_sink)) ++ goto end_of_iter; ++ ++ return log_sfile_sink; ++ ++end_of_iter: ++ pr_debug("%s kfree\n", __func__); ++ kfree(log_sfile_sink); ++ return rc < 0 ? ERR_PTR(rc) : NULL; ++} ++ ++static void trusty_log_seq_stop(struct seq_file *sfile, void *v) ++{ ++ /* ++ * When iteration completes or on error, the next callback frees ++ * the sink structure and returns NULL/error-code. ++ * In that case stop (being invoked with void* v set to the last next ++ * return value) would be invoked with v == NULL or error code. ++ * When user space stops the iteration earlier than the end ++ * (in case of user-space memory allocation limit for example) ++ * then the stop function receives a non NULL get pointer ++ * and is in charge or freeing the sink structure. ++ */ ++ struct trusty_log_sink_state *log_sfile_sink = v; ++ ++ /* nothing to do - sink structure already freed */ ++ if (IS_ERR_OR_NULL(log_sfile_sink)) ++ return; ++ ++ kfree(log_sfile_sink); ++ ++ pr_debug("%s kfree\n", __func__); ++} ++ ++static int trusty_log_seq_show(struct seq_file *sfile, void *v) ++{ ++ struct trusty_log_sfile *lb; ++ struct trusty_log_state *s; ++ struct trusty_log_sink_state *log_sfile_sink = v; ++ ++ if (WARN_ON(!log_sfile_sink)) ++ return -EINVAL; ++ ++ lb = sfile->private; ++ if (WARN_ON(!lb)) ++ return -EINVAL; ++ ++ s = container_of(lb, struct trusty_log_state, log_sfile); ++ ++ trusty_log_show(s, log_sfile_sink); ++ return 0; ++} ++ ++static void trusty_dump_logs(struct trusty_log_state *s) ++{ ++ int rc; ++ /* ++ * note: klog_sink.get initialized to zero by kzalloc ++ */ ++ s->klog_sink.trusty_panicked = trusty_get_panic_status(s->trusty_dev); ++ ++ rc = trusty_log_start(s, &s->klog_sink, s->klog_sink.get); ++ if (rc < 0) ++ return; ++ ++ while (trusty_log_has_data(s, &s->klog_sink)) ++ trusty_log_show(s, &s->klog_sink); ++} ++ ++static int trusty_log_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_log_state *s; ++ unsigned long flags; ++ u32 cur_put; ++ ++ if (action != TRUSTY_CALL_RETURNED) ++ return NOTIFY_DONE; ++ ++ s = container_of(nb, struct trusty_log_state, call_notifier); ++ spin_lock_irqsave(&s->wake_up_lock, flags); ++ cur_put = s->log->put; ++ if (cur_put != s->last_wake_put) { ++ s->last_wake_put = cur_put; ++ wake_up_all(&s->poll_waiters); ++ } ++ spin_unlock_irqrestore(&s->wake_up_lock, flags); ++ return NOTIFY_OK; ++} ++ ++static int trusty_log_panic_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_log_state *s; ++ ++ /* ++ * Don't grab the spin lock to hold up the panic notifier, even ++ * though this is racy. ++ */ ++ s = container_of(nb, struct trusty_log_state, panic_notifier); ++ dev_info(s->dev, "panic notifier - trusty version %s", ++ trusty_version_str_get(s->trusty_dev)); ++ trusty_dump_logs(s); ++ return NOTIFY_OK; ++} ++ ++const struct seq_operations trusty_log_seq_ops = { ++ .start = trusty_log_seq_start, ++ .stop = trusty_log_seq_stop, ++ .next = trusty_log_seq_next, ++ .show = trusty_log_seq_show, ++}; ++ ++static int trusty_log_sfile_dev_open(struct inode *inode, struct file *file) ++{ ++ struct trusty_log_sfile *ls; ++ struct seq_file *sfile; ++ int rc; ++ ++ /* ++ * file->private_data contains a pointer to the misc_device struct ++ * passed to misc_register() ++ */ ++ if (WARN_ON(!file->private_data)) ++ return -EINVAL; ++ ++ ls = container_of(file->private_data, struct trusty_log_sfile, misc); ++ ++ /* ++ * seq_open uses file->private_data to store the seq_file associated ++ * with the struct file, but it must be NULL when seq_open is called ++ */ ++ file->private_data = NULL; ++ rc = seq_open(file, &trusty_log_seq_ops); ++ if (rc < 0) ++ return rc; ++ ++ sfile = file->private_data; ++ if (WARN_ON(!sfile)) ++ return -EINVAL; ++ ++ sfile->private = ls; ++ return 0; ++} ++ ++static unsigned int trusty_log_sfile_dev_poll(struct file *filp, ++ struct poll_table_struct *wait) ++{ ++ struct seq_file *sfile; ++ struct trusty_log_sfile *lb; ++ struct trusty_log_state *s; ++ struct log_rb *log; ++ ++ /* ++ * trusty_log_sfile_dev_open() pointed filp->private_data to a ++ * seq_file, and that seq_file->private to the trusty_log_sfile ++ * field of a trusty_log_state ++ */ ++ sfile = filp->private_data; ++ lb = sfile->private; ++ s = container_of(lb, struct trusty_log_state, log_sfile); ++ poll_wait(filp, &s->poll_waiters, wait); ++ log = s->log; ++ ++ /* ++ * Userspace has read up to filp->f_pos so far. Update klog_sink ++ * to indicate that, so that we don't end up dumping the entire ++ * Trusty log in case of panic. ++ */ ++ s->klog_sink.get = (u32)filp->f_pos; ++ ++ if (log->put != (u32)filp->f_pos) { ++ /* data ready to read */ ++ return EPOLLIN | EPOLLRDNORM; ++ } ++ /* no data available, go to sleep */ ++ return 0; ++} ++ ++static const struct file_operations log_sfile_dev_operations = { ++ .owner = THIS_MODULE, ++ .open = trusty_log_sfile_dev_open, ++ .poll = trusty_log_sfile_dev_poll, ++ .read = seq_read, ++ .release = seq_release, ++}; ++ ++static int trusty_log_sfile_register(struct trusty_log_state *s) ++{ ++ int ret; ++ struct trusty_log_sfile *ls = &s->log_sfile; ++ ++ if (WARN_ON(!ls)) ++ return -EINVAL; ++ ++ snprintf(ls->device_name, sizeof(ls->device_name), ++ "trusty-log%d", s->dev->id); ++ ls->misc.minor = MISC_DYNAMIC_MINOR; ++ ls->misc.name = ls->device_name; ++ ls->misc.fops = &log_sfile_dev_operations; ++ ++ ret = misc_register(&ls->misc); ++ if (ret) { ++ dev_err(s->dev, ++ "log_sfile error while doing misc_register ret=%d\n", ++ ret); ++ return ret; ++ } ++ dev_info(s->dev, "/dev/%s registered\n", ++ ls->device_name); ++ return 0; ++} ++ ++static void trusty_log_sfile_unregister(struct trusty_log_state *s) ++{ ++ struct trusty_log_sfile *ls = &s->log_sfile; ++ ++ misc_deregister(&ls->misc); ++ if (s->dev) { ++ dev_info(s->dev, "/dev/%s unregistered\n", ++ ls->misc.name); ++ } ++} ++ ++static bool trusty_supports_logging(struct device *device) ++{ ++ int result; ++ ++ result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION, ++ TRUSTY_LOG_API_VERSION, 0, 0); ++ if (result == SM_ERR_UNDEFINED_SMC) { ++ dev_info(device, "trusty-log not supported on secure side.\n"); ++ return false; ++ } else if (result < 0) { ++ dev_err(device, ++ "trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n", ++ result); ++ return false; ++ } ++ ++ if (result != TRUSTY_LOG_API_VERSION) { ++ dev_info(device, "unsupported api version: %d, supported: %d\n", ++ result, TRUSTY_LOG_API_VERSION); ++ return false; ++ } ++ return true; ++} ++ ++static int trusty_log_init(struct platform_device *pdev) ++{ ++ struct trusty_log_state *s; ++ struct scatterlist *sg; ++ unsigned char *mem; ++ int i; ++ int result; ++ trusty_shared_mem_id_t mem_id; ++ int log_size; ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) { ++ result = -ENOMEM; ++ goto error_alloc_state; ++ } ++ ++ s->dev = &pdev->dev; ++ s->trusty_dev = s->dev->parent; ++ ++ s->log_num_pages = DIV_ROUND_UP(log_size_param + sizeof(struct log_rb), ++ PAGE_SIZE); ++ s->sg = kcalloc(s->log_num_pages, sizeof(*s->sg), GFP_KERNEL); ++ if (!s->sg) { ++ result = -ENOMEM; ++ goto error_alloc_sg; ++ } ++ ++ log_size = s->log_num_pages * PAGE_SIZE; ++ mem = vzalloc(log_size); ++ if (!mem) { ++ result = -ENOMEM; ++ goto error_alloc_log; ++ } ++ ++ s->log = (struct log_rb *)mem; ++ ++ sg_init_table(s->sg, s->log_num_pages); ++ for_each_sg(s->sg, sg, s->log_num_pages, i) { ++ struct page *pg = vmalloc_to_page(mem + (i * PAGE_SIZE)); ++ ++ if (!pg) { ++ result = -ENOMEM; ++ goto err_share_memory; ++ } ++ sg_set_page(sg, pg, PAGE_SIZE, 0); ++ } ++ /* ++ * This will fail for Trusty api version < TRUSTY_API_VERSION_MEM_OBJ ++ * if s->log_num_pages > 1 ++ * Use trusty_share_memory_compat instead of trusty_share_memory in case ++ * s->log_num_pages == 1 and api version < TRUSTY_API_VERSION_MEM_OBJ, ++ * In that case SMC_SC_SHARED_LOG_ADD expects a different value than ++ * what trusty_share_memory returns ++ */ ++ result = trusty_share_memory_compat(s->trusty_dev, &mem_id, s->sg, ++ s->log_num_pages, PAGE_KERNEL); ++ if (result) { ++ dev_err(s->dev, "trusty_share_memory failed: %d\n", result); ++ goto err_share_memory; ++ } ++ s->log_pages_shared_mem_id = mem_id; ++ ++ result = trusty_std_call32(s->trusty_dev, ++ SMC_SC_SHARED_LOG_ADD, ++ (u32)(mem_id), (u32)(mem_id >> 32), ++ log_size); ++ if (result < 0) { ++ dev_err(s->dev, ++ "trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d 0x%llx\n", ++ result, mem_id); ++ goto error_std_call; ++ } ++ ++ init_waitqueue_head(&s->poll_waiters); ++ spin_lock_init(&s->wake_up_lock); ++ ++ s->call_notifier.notifier_call = trusty_log_call_notify; ++ result = trusty_call_notifier_register(s->trusty_dev, ++ &s->call_notifier); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to register trusty call notifier\n"); ++ goto error_call_notifier; ++ } ++ ++ s->panic_notifier.notifier_call = trusty_log_panic_notify; ++ result = atomic_notifier_chain_register(&panic_notifier_list, ++ &s->panic_notifier); ++ if (result < 0) { ++ dev_err(&pdev->dev, ++ "failed to register panic notifier\n"); ++ goto error_panic_notifier; ++ } ++ ++ result = trusty_log_sfile_register(s); ++ if (result < 0) { ++ dev_err(&pdev->dev, "failed to register log_sfile\n"); ++ goto error_log_sfile; ++ } ++ ++ platform_set_drvdata(pdev, s); ++ ++ return 0; ++ ++error_log_sfile: ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &s->panic_notifier); ++error_panic_notifier: ++ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); ++error_call_notifier: ++ trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, ++ (u32)mem_id, (u32)(mem_id >> 32), 0); ++error_std_call: ++ if (WARN_ON(trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg, ++ s->log_num_pages))) { ++ dev_err(&pdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n", ++ result, mem_id); ++ /* ++ * It is not safe to free this memory if trusty_revoke_memory ++ * fails. Leak it in that case. ++ */ ++ } else { ++err_share_memory: ++ vfree(s->log); ++ } ++error_alloc_log: ++ kfree(s->sg); ++error_alloc_sg: ++ kfree(s); ++error_alloc_state: ++ return result; ++} ++ ++static int trusty_log_probe(struct platform_device *pdev) ++{ ++ int rc; ++ ++ if (!trusty_supports_logging(pdev->dev.parent)) ++ return -ENXIO; ++ ++ rc = trusty_log_init(pdev); ++ if (rc && log_size_param > TRUSTY_LOG_MIN_SIZE) { ++ dev_warn(&pdev->dev, "init failed, retrying with 1-page log\n"); ++ log_size_param = TRUSTY_LOG_MIN_SIZE; ++ rc = trusty_log_init(pdev); ++ } ++ return rc; ++} ++ ++static int trusty_log_remove(struct platform_device *pdev) ++{ ++ int result; ++ struct trusty_log_state *s = platform_get_drvdata(pdev); ++ trusty_shared_mem_id_t mem_id = s->log_pages_shared_mem_id; ++ ++ trusty_log_sfile_unregister(s); ++ atomic_notifier_chain_unregister(&panic_notifier_list, ++ &s->panic_notifier); ++ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); ++ ++ result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, ++ (u32)mem_id, (u32)(mem_id >> 32), 0); ++ if (result) { ++ dev_err(&pdev->dev, ++ "trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n", ++ result); ++ } ++ result = trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg, ++ s->log_num_pages); ++ if (WARN_ON(result)) { ++ dev_err(&pdev->dev, ++ "trusty failed to remove shared memory: %d\n", result); ++ } else { ++ /* ++ * It is not safe to free this memory if trusty_revoke_memory ++ * fails. Leak it in that case. ++ */ ++ vfree(s->log); ++ } ++ kfree(s->sg); ++ kfree(s); ++ ++ return 0; ++} ++ ++static const struct of_device_id trusty_test_of_match[] = { ++ { .compatible = "android,trusty-log-v1", }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(trusty, trusty_test_of_match); ++ ++static struct platform_driver trusty_log_driver = { ++ .probe = trusty_log_probe, ++ .remove = trusty_log_remove, ++ .driver = { ++ .name = "trusty-log", ++ .of_match_table = trusty_test_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_log_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty logging driver"); +diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h +new file mode 100644 +index 000000000000..7b5e6096b51e +--- /dev/null ++++ b/drivers/trusty/trusty-log.h +@@ -0,0 +1,28 @@ ++/* SPDX-License-Identifier: MIT */ ++/* ++ * Copyright (c) 2015 Google, Inc. ++ * ++ * Trusty also has a copy of this header. Please keep the copies in sync. ++ */ ++#ifndef _TRUSTY_LOG_H_ ++#define _TRUSTY_LOG_H_ ++ ++/* ++ * Ring buffer that supports one secure producer thread and one ++ * linux side consumer thread. ++ */ ++struct log_rb { ++ volatile uint32_t alloc; ++ volatile uint32_t put; ++ uint32_t sz; ++ volatile char data[]; ++} __packed; ++ ++#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0) ++#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1) ++#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2) ++ ++#define TRUSTY_LOG_API_VERSION 1 ++ ++#endif ++ +diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c +new file mode 100644 +index 000000000000..8a360298e501 +--- /dev/null ++++ b/drivers/trusty/trusty-mem.c +@@ -0,0 +1,139 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2015 Google, Inc. ++ */ ++ ++#include <linux/types.h> ++#include <linux/printk.h> ++#include <linux/trusty/arm_ffa.h> ++#include <linux/trusty/trusty.h> ++#include <linux/trusty/smcall.h> ++ ++#define MEM_ATTR_STRONGLY_ORDERED (0x00U) ++#define MEM_ATTR_DEVICE (0x04U) ++#define MEM_ATTR_NORMAL_NON_CACHEABLE (0x44U) ++#define MEM_ATTR_NORMAL_WRITE_THROUGH (0xAAU) ++#define MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE (0xEEU) ++#define MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE (0xFFU) ++ ++#define ATTR_RDONLY (1U << 7) ++#define ATTR_INNER_SHAREABLE (3U << 8) ++ ++static int get_mem_attr(struct page *page, pgprot_t pgprot) ++{ ++#if defined(CONFIG_ARM64) ++ u64 mair; ++ unsigned int attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2; ++ ++ asm ("mrs %0, mair_el1\n" : "=&r" (mair)); ++ return (mair >> (attr_index * 8)) & 0xff; ++ ++#elif defined(CONFIG_ARM_LPAE) ++ u32 mair; ++ unsigned int attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2); ++ ++ if (attr_index >= 4) { ++ attr_index -= 4; ++ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair)); ++ } else { ++ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair)); ++ } ++ return (mair >> (attr_index * 8)) & 0xff; ++ ++#elif defined(CONFIG_ARM) ++ /* check memory type */ ++ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) { ++ case L_PTE_MT_WRITEALLOC: ++ return MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE; ++ ++ case L_PTE_MT_BUFFERABLE: ++ return MEM_ATTR_NORMAL_NON_CACHEABLE; ++ ++ case L_PTE_MT_WRITEBACK: ++ return MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE; ++ ++ case L_PTE_MT_WRITETHROUGH: ++ return MEM_ATTR_NORMAL_WRITE_THROUGH; ++ ++ case L_PTE_MT_UNCACHED: ++ return MEM_ATTR_STRONGLY_ORDERED; ++ ++ case L_PTE_MT_DEV_SHARED: ++ case L_PTE_MT_DEV_NONSHARED: ++ return MEM_ATTR_DEVICE; ++ ++ default: ++ return -EINVAL; ++ } ++#else ++ return 0; ++#endif ++} ++ ++int trusty_encode_page_info(struct ns_mem_page_info *inf, ++ struct page *page, pgprot_t pgprot) ++{ ++ int mem_attr; ++ u64 pte; ++ u8 ffa_mem_attr; ++ u8 ffa_mem_perm = 0; ++ ++ if (!inf || !page) ++ return -EINVAL; ++ ++ /* get physical address */ ++ pte = (u64)page_to_phys(page); ++ ++ /* get memory attributes */ ++ mem_attr = get_mem_attr(page, pgprot); ++ if (mem_attr < 0) ++ return mem_attr; ++ ++ switch (mem_attr) { ++ case MEM_ATTR_STRONGLY_ORDERED: ++ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRNE; ++ break; ++ ++ case MEM_ATTR_DEVICE: ++ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRE; ++ break; ++ ++ case MEM_ATTR_NORMAL_NON_CACHEABLE: ++ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED; ++ break; ++ ++ case MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE: ++ case MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE: ++ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ inf->paddr = pte; ++ ++ /* add other attributes */ ++#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) ++ pte |= pgprot_val(pgprot); ++#elif defined(CONFIG_ARM) ++ if (pgprot_val(pgprot) & L_PTE_RDONLY) ++ pte |= ATTR_RDONLY; ++ if (pgprot_val(pgprot) & L_PTE_SHARED) ++ pte |= ATTR_INNER_SHAREABLE; /* inner sharable */ ++#endif ++ ++ if (!(pte & ATTR_RDONLY)) ++ ffa_mem_perm |= FFA_MEM_PERM_RW; ++ else ++ ffa_mem_perm |= FFA_MEM_PERM_RO; ++ ++ if ((pte & ATTR_INNER_SHAREABLE) == ATTR_INNER_SHAREABLE) ++ ffa_mem_attr |= FFA_MEM_ATTR_INNER_SHAREABLE; ++ ++ inf->ffa_mem_attr = ffa_mem_attr; ++ inf->ffa_mem_perm = ffa_mem_perm; ++ inf->compat_attr = (pte & 0x0000FFFFFFFFFFFFull) | ++ ((u64)mem_attr << 48); ++ return 0; ++} +diff --git a/drivers/trusty/trusty-smc-arm.S b/drivers/trusty/trusty-smc-arm.S +new file mode 100644 +index 000000000000..8ff83547d33f +--- /dev/null ++++ b/drivers/trusty/trusty-smc-arm.S +@@ -0,0 +1,41 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2020 Google, Inc. ++ */ ++ ++#include <linux/linkage.h> ++ ++.arch_extension sec ++ ++ENTRY(trusty_smc8) ++ /* Save stack location where r3-r7 smc arguments are stored */ ++ mov r12, sp ++ ++ /* Save original r4-r7 values as caller expects these to be preserved */ ++ push {r4-r7} ++ ++ /* Save return value pointer and return address */ ++ push {r0, lr} ++ ++ /* arm abi shifts arguments when returning a struct, shift them back */ ++ mov r0, r1 ++ mov r1, r2 ++ mov r2, r3 ++ ++ /* Load stack based arguments */ ++ ldmia r12, {r3-r7} ++ ++ smc #0 ++ ++ /* Restore return address and get return value pointer */ ++ pop {r12, lr} ++ ++ /* Copy 8-register smc return value to struct smc_ret8 return value */ ++ stmia r12, {r0-r7} ++ ++ /* Restore original r4-r7 values */ ++ pop {r4-r7} ++ ++ /* Return */ ++ bx lr ++ENDPROC(trusty_smc8) +diff --git a/drivers/trusty/trusty-smc-arm64.S b/drivers/trusty/trusty-smc-arm64.S +new file mode 100644 +index 000000000000..14c8fed28a5e +--- /dev/null ++++ b/drivers/trusty/trusty-smc-arm64.S +@@ -0,0 +1,35 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2020 Google, Inc. ++ */ ++ ++#include <linux/linkage.h> ++ ++.macro push ra, rb ++stp \ra, \rb, [sp,#-16]! ++.endm ++ ++.macro pop ra, rb ++ldp \ra, \rb, [sp], #16 ++.endm ++ ++lr .req x30 ++ ++SYM_FUNC_START(trusty_smc8) ++ /* ++ * Save x8 (return value ptr) and lr. The SMC calling convention says el3 ++ * does not need to preserve x8. The normal ABI does not require either x8 ++ * or lr to be preserved. ++ */ ++ push x8, lr ++ smc #0 ++ pop x8, lr ++ ++ /* Copy 8-register smc return value to struct smc_ret8 return value */ ++ stp x0, x1, [x8], #16 ++ stp x2, x3, [x8], #16 ++ stp x4, x5, [x8], #16 ++ stp x6, x7, [x8], #16 ++ ++ ret ++SYM_FUNC_END(trusty_smc8) +diff --git a/drivers/trusty/trusty-smc.h b/drivers/trusty/trusty-smc.h +new file mode 100644 +index 000000000000..b53e5abb4d05 +--- /dev/null ++++ b/drivers/trusty/trusty-smc.h +@@ -0,0 +1,26 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2020 Google, Inc. ++ */ ++#ifndef _TRUSTY_SMC_H ++#define _TRUSTY_SMC_H ++ ++#include <linux/types.h> ++ ++struct smc_ret8 { ++ unsigned long r0; ++ unsigned long r1; ++ unsigned long r2; ++ unsigned long r3; ++ unsigned long r4; ++ unsigned long r5; ++ unsigned long r6; ++ unsigned long r7; ++}; ++ ++struct smc_ret8 trusty_smc8(unsigned long r0, unsigned long r1, ++ unsigned long r2, unsigned long r3, ++ unsigned long r4, unsigned long r5, ++ unsigned long r6, unsigned long r7); ++ ++#endif /* _TRUSTY_SMC_H */ +diff --git a/drivers/trusty/trusty-test.c b/drivers/trusty/trusty-test.c +new file mode 100644 +index 000000000000..844868981fa5 +--- /dev/null ++++ b/drivers/trusty/trusty-test.c +@@ -0,0 +1,440 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2020 Google, Inc. ++ */ ++ ++#include <linux/ctype.h> ++#include <linux/list.h> ++#include <linux/platform_device.h> ++#include <linux/trusty/smcall.h> ++#include <linux/trusty/trusty.h> ++#include <linux/scatterlist.h> ++#include <linux/slab.h> ++#include <linux/mm.h> ++#include <linux/mod_devicetable.h> ++#include <linux/module.h> ++ ++#include "trusty-test.h" ++ ++struct trusty_test_state { ++ struct device *dev; ++ struct device *trusty_dev; ++}; ++ ++struct trusty_test_shmem_obj { ++ struct list_head node; ++ size_t page_count; ++ struct page **pages; ++ void *buf; ++ struct sg_table sgt; ++ trusty_shared_mem_id_t mem_id; ++}; ++ ++/* ++ * Allocate a test object with @page_count number of pages, map it and add it to ++ * @list. ++ * For multi-page allocations, order the pages so they are not contiguous. ++ */ ++static int trusty_test_alloc_obj(struct trusty_test_state *s, ++ size_t page_count, ++ struct list_head *list) ++{ ++ size_t i; ++ int ret = -ENOMEM; ++ struct trusty_test_shmem_obj *obj; ++ ++ obj = kzalloc(sizeof(*obj), GFP_KERNEL); ++ if (!obj) ++ goto err_alloc_obj; ++ obj->page_count = page_count; ++ ++ obj->pages = kmalloc_array(page_count, sizeof(*obj->pages), GFP_KERNEL); ++ if (!obj->pages) { ++ ret = -ENOMEM; ++ dev_err(s->dev, "failed to allocate page array, count %zd\n", ++ page_count); ++ goto err_alloc_pages; ++ } ++ ++ for (i = 0; i < page_count; i++) { ++ obj->pages[i] = alloc_page(GFP_KERNEL); ++ if (!obj->pages[i]) { ++ ret = -ENOMEM; ++ dev_err(s->dev, "failed to allocate page %zd/%zd\n", ++ i, page_count); ++ goto err_alloc_page; ++ } ++ if (i > 0 && obj->pages[i - 1] + 1 == obj->pages[i]) { ++ /* swap adacent pages to increase fragmentation */ ++ swap(obj->pages[i - 1], obj->pages[i]); ++ } ++ } ++ ++ obj->buf = vmap(obj->pages, page_count, VM_MAP, PAGE_KERNEL); ++ if (!obj->buf) { ++ ret = -ENOMEM; ++ dev_err(s->dev, "failed to map test buffer page count %zd\n", ++ page_count); ++ goto err_map_pages; ++ } ++ ++ ret = sg_alloc_table_from_pages(&obj->sgt, obj->pages, page_count, ++ 0, page_count * PAGE_SIZE, GFP_KERNEL); ++ if (ret) { ++ dev_err(s->dev, "sg_alloc_table_from_pages failed: %d\n", ret); ++ goto err_alloc_sgt; ++ } ++ list_add_tail(&obj->node, list); ++ dev_dbg(s->dev, "buffer has %d page runs\n", obj->sgt.nents); ++ return 0; ++ ++err_alloc_sgt: ++ vunmap(obj->buf); ++err_map_pages: ++ for (i = page_count; i > 0; i--) { ++ __free_page(obj->pages[i - 1]); ++err_alloc_page: ++ ; ++ } ++ kfree(obj->pages); ++err_alloc_pages: ++ kfree(obj); ++err_alloc_obj: ++ return ret; ++} ++ ++/* Unlink, unmap and free a test object and its pages */ ++static void trusty_test_free_obj(struct trusty_test_state *s, ++ struct trusty_test_shmem_obj *obj) ++{ ++ size_t i; ++ ++ list_del(&obj->node); ++ sg_free_table(&obj->sgt); ++ vunmap(obj->buf); ++ for (i = obj->page_count; i > 0; i--) ++ __free_page(obj->pages[i - 1]); ++ kfree(obj->pages); ++ kfree(obj); ++} ++ ++/* ++ * Share all the pages of all the test object in &obj_list. ++ * If sharing a test object fails, free it so that every test object that ++ * remains in @obj_list has been shared when this function returns. ++ * Return a error if any test object failed to be shared. ++ */ ++static int trusty_test_share_objs(struct trusty_test_state *s, ++ struct list_head *obj_list, size_t size) ++{ ++ int ret = 0; ++ int tmpret; ++ struct trusty_test_shmem_obj *obj; ++ struct trusty_test_shmem_obj *next_obj; ++ ktime_t t1; ++ ktime_t t2; ++ ++ list_for_each_entry_safe(obj, next_obj, obj_list, node) { ++ t1 = ktime_get(); ++ tmpret = trusty_share_memory(s->trusty_dev, &obj->mem_id, ++ obj->sgt.sgl, obj->sgt.nents, ++ PAGE_KERNEL); ++ t2 = ktime_get(); ++ if (tmpret) { ++ ret = tmpret; ++ dev_err(s->dev, ++ "trusty_share_memory failed: %d, size=%zd\n", ++ ret, size); ++ ++ /* ++ * Free obj and continue, so we can revoke the ++ * whole list in trusty_test_reclaim_objs. ++ */ ++ trusty_test_free_obj(s, obj); ++ } ++ dev_dbg(s->dev, "share id=0x%llx, size=%zu took %lld ns\n", ++ obj->mem_id, size, ++ ktime_to_ns(ktime_sub(t2, t1))); ++ } ++ ++ return ret; ++} ++ ++/* Reclaim memory shared with trusty for all test objects in @obj_list. */ ++static int trusty_test_reclaim_objs(struct trusty_test_state *s, ++ struct list_head *obj_list, size_t size) ++{ ++ int ret = 0; ++ int tmpret; ++ struct trusty_test_shmem_obj *obj; ++ struct trusty_test_shmem_obj *next_obj; ++ ktime_t t1; ++ ktime_t t2; ++ ++ list_for_each_entry_safe(obj, next_obj, obj_list, node) { ++ t1 = ktime_get(); ++ tmpret = trusty_reclaim_memory(s->trusty_dev, obj->mem_id, ++ obj->sgt.sgl, obj->sgt.nents); ++ t2 = ktime_get(); ++ if (tmpret) { ++ ret = tmpret; ++ dev_err(s->dev, ++ "trusty_reclaim_memory failed: %d, id=0x%llx\n", ++ ret, obj->mem_id); ++ ++ /* ++ * It is not safe to free this memory if ++ * trusty_reclaim_memory fails. Leak it in that ++ * case. ++ */ ++ list_del(&obj->node); ++ } ++ dev_dbg(s->dev, "revoke id=0x%llx, size=%zu took %lld ns\n", ++ obj->mem_id, size, ++ ktime_to_ns(ktime_sub(t2, t1))); ++ } ++ ++ return ret; ++} ++ ++/* ++ * Test a test object. First, initialize the memory, then make a std call into ++ * trusty which will read it and return an error if the initialized value does ++ * not match what it expects. If trusty reads the correct values, it will modify ++ * the memory and return 0. This function then checks that it can read the ++ * correct modified value. ++ */ ++static int trusty_test_rw(struct trusty_test_state *s, ++ struct trusty_test_shmem_obj *obj) ++{ ++ size_t size = obj->page_count * PAGE_SIZE; ++ int ret; ++ size_t i; ++ u64 *buf = obj->buf; ++ ktime_t t1; ++ ktime_t t2; ++ ++ for (i = 0; i < size / sizeof(*buf); i++) ++ buf[i] = i; ++ ++ t1 = ktime_get(); ++ ret = trusty_std_call32(s->trusty_dev, SMC_SC_TEST_SHARED_MEM_RW, ++ (u32)(obj->mem_id), (u32)(obj->mem_id >> 32), ++ size); ++ t2 = ktime_get(); ++ if (ret < 0) { ++ dev_err(s->dev, ++ "trusty std call (SMC_SC_TEST_SHARED_MEM_RW) failed: %d 0x%llx\n", ++ ret, obj->mem_id); ++ return ret; ++ } ++ ++ for (i = 0; i < size / sizeof(*buf); i++) { ++ if (buf[i] != size - i) { ++ dev_err(s->dev, ++ "input mismatch at %zd, got 0x%llx instead of 0x%zx\n", ++ i, buf[i], size - i); ++ return -EIO; ++ } ++ } ++ ++ dev_dbg(s->dev, "rw id=0x%llx, size=%zu took %lld ns\n", obj->mem_id, ++ size, ktime_to_ns(ktime_sub(t2, t1))); ++ ++ return 0; ++} ++ ++/* ++ * Run test on every test object in @obj_list. Repeat @repeat_access times. ++ */ ++static int trusty_test_rw_objs(struct trusty_test_state *s, ++ struct list_head *obj_list, ++ size_t repeat_access) ++{ ++ int ret; ++ size_t i; ++ struct trusty_test_shmem_obj *obj; ++ ++ for (i = 0; i < repeat_access; i++) { ++ /* ++ * Repeat test in case the memory attributes don't match ++ * and either side see old data. ++ */ ++ list_for_each_entry(obj, obj_list, node) { ++ ret = trusty_test_rw(s, obj); ++ if (ret) ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * Allocate @obj_count test object that each have @page_count pages. Share each ++ * object @repeat_share times, each time running tests on every object ++ * @repeat_access times. ++ */ ++static int trusty_test_run(struct trusty_test_state *s, size_t page_count, ++ size_t obj_count, size_t repeat_share, ++ size_t repeat_access) ++{ ++ int ret = 0; ++ int tmpret; ++ size_t i; ++ size_t size = page_count * PAGE_SIZE; ++ LIST_HEAD(obj_list); ++ struct trusty_test_shmem_obj *obj; ++ struct trusty_test_shmem_obj *next_obj; ++ ++ for (i = 0; i < obj_count && !ret; i++) ++ ret = trusty_test_alloc_obj(s, page_count, &obj_list); ++ ++ for (i = 0; i < repeat_share && !ret; i++) { ++ ret = trusty_test_share_objs(s, &obj_list, size); ++ if (ret) { ++ dev_err(s->dev, ++ "trusty_share_memory failed: %d, i=%zd/%zd, size=%zd\n", ++ ret, i, repeat_share, size); ++ } else { ++ ret = trusty_test_rw_objs(s, &obj_list, repeat_access); ++ if (ret) ++ dev_err(s->dev, ++ "test failed: %d, i=%zd/%zd, size=%zd\n", ++ ret, i, repeat_share, size); ++ } ++ tmpret = trusty_test_reclaim_objs(s, &obj_list, size); ++ if (tmpret) { ++ ret = tmpret; ++ dev_err(s->dev, ++ "trusty_reclaim_memory failed: %d, i=%zd/%zd\n", ++ ret, i, repeat_share); ++ } ++ } ++ ++ list_for_each_entry_safe(obj, next_obj, &obj_list, node) ++ trusty_test_free_obj(s, obj); ++ ++ dev_info(s->dev, "[ %s ] size %zd, obj_count %zd, repeat_share %zd, repeat_access %zd\n", ++ ret ? "FAILED" : "PASSED", size, obj_count, repeat_share, ++ repeat_access); ++ ++ return ret; ++} ++ ++/* ++ * Get an optional numeric argument from @buf, update @buf and return the value. ++ * If @buf does not start with ",", return @default_val instead. ++ */ ++static size_t trusty_test_get_arg(const char **buf, size_t default_val) ++{ ++ char *buf_next; ++ size_t ret; ++ ++ if (**buf != ',') ++ return default_val; ++ ++ (*buf)++; ++ ret = simple_strtoul(*buf, &buf_next, 0); ++ if (buf_next == *buf) ++ return default_val; ++ ++ *buf = buf_next; ++ ++ return ret; ++} ++ ++/* ++ * Run tests described by a string in this format: ++ * <obj_size>,<obj_count=1>,<repeat_share=1>,<repeat_access=3> ++ */ ++static ssize_t trusty_test_run_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct trusty_test_state *s = platform_get_drvdata(pdev); ++ size_t size; ++ size_t obj_count; ++ size_t repeat_share; ++ size_t repeat_access; ++ int ret; ++ char *buf_next; ++ ++ while (true) { ++ while (isspace(*buf)) ++ buf++; ++ size = simple_strtoul(buf, &buf_next, 0); ++ if (buf_next == buf) ++ return count; ++ buf = buf_next; ++ obj_count = trusty_test_get_arg(&buf, 1); ++ repeat_share = trusty_test_get_arg(&buf, 1); ++ repeat_access = trusty_test_get_arg(&buf, 3); ++ ++ ret = trusty_test_run(s, DIV_ROUND_UP(size, PAGE_SIZE), ++ obj_count, repeat_share, repeat_access); ++ if (ret) ++ return ret; ++ } ++} ++ ++static DEVICE_ATTR_WO(trusty_test_run); ++ ++static struct attribute *trusty_test_attrs[] = { ++ &dev_attr_trusty_test_run.attr, ++ NULL, ++}; ++ATTRIBUTE_GROUPS(trusty_test); ++ ++static int trusty_test_probe(struct platform_device *pdev) ++{ ++ struct trusty_test_state *s; ++ int ret; ++ ++ ret = trusty_std_call32(pdev->dev.parent, SMC_SC_TEST_VERSION, ++ TRUSTY_STDCALLTEST_API_VERSION, 0, 0); ++ if (ret != TRUSTY_STDCALLTEST_API_VERSION) ++ return -ENOENT; ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) ++ return -ENOMEM; ++ ++ s->dev = &pdev->dev; ++ s->trusty_dev = s->dev->parent; ++ ++ platform_set_drvdata(pdev, s); ++ ++ return 0; ++} ++ ++static int trusty_test_remove(struct platform_device *pdev) ++{ ++ struct trusty_log_state *s = platform_get_drvdata(pdev); ++ ++ kfree(s); ++ return 0; ++} ++ ++static const struct of_device_id trusty_test_of_match[] = { ++ { .compatible = "android,trusty-test-v1", }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(trusty, trusty_test_of_match); ++ ++static struct platform_driver trusty_test_driver = { ++ .probe = trusty_test_probe, ++ .remove = trusty_test_remove, ++ .driver = { ++ .name = "trusty-test", ++ .of_match_table = trusty_test_of_match, ++ .dev_groups = trusty_test_groups, ++ }, ++}; ++ ++module_platform_driver(trusty_test_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty test driver"); +diff --git a/drivers/trusty/trusty-test.h b/drivers/trusty/trusty-test.h +new file mode 100644 +index 000000000000..eea7beb96876 +--- /dev/null ++++ b/drivers/trusty/trusty-test.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (c) 2020 Google, Inc. ++ */ ++#ifndef _TRUSTY_TEST_H ++#define _TRUSTY_TEST_H ++ ++#define SMC_SC_TEST_VERSION SMC_STDCALL_NR(SMC_ENTITY_TEST, 0) ++#define SMC_SC_TEST_SHARED_MEM_RW SMC_STDCALL_NR(SMC_ENTITY_TEST, 1) ++ ++#define TRUSTY_STDCALLTEST_API_VERSION 1 ++ ++#endif /* _TRUSTY_TEST_H */ +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +new file mode 100644 +index 000000000000..fea59cd2e218 +--- /dev/null ++++ b/drivers/trusty/trusty-virtio.c +@@ -0,0 +1,840 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Trusty Virtio driver ++ * ++ * Copyright (C) 2015 Google, Inc. ++ */ ++#include <linux/device.h> ++#include <linux/err.h> ++#include <linux/kernel.h> ++ ++#include <linux/dma-map-ops.h> ++#include <linux/module.h> ++#include <linux/mutex.h> ++#include <linux/notifier.h> ++#include <linux/workqueue.h> ++#include <linux/remoteproc.h> ++#include <linux/slab.h> ++ ++#include <linux/platform_device.h> ++#include <linux/trusty/smcall.h> ++#include <linux/trusty/trusty.h> ++#include <linux/trusty/trusty_ipc.h> ++ ++#include <linux/virtio.h> ++#include <linux/virtio_config.h> ++#include <linux/virtio_ids.h> ++#include <linux/virtio_ring.h> ++ ++#include <linux/atomic.h> ++ ++#define RSC_DESCR_VER 1 ++ ++struct trusty_vdev; ++ ++struct trusty_ctx { ++ struct device *dev; ++ void *shared_va; ++ struct scatterlist shared_sg; ++ trusty_shared_mem_id_t shared_id; ++ size_t shared_sz; ++ struct work_struct check_vqs; ++ struct work_struct kick_vqs; ++ struct notifier_block call_notifier; ++ struct list_head vdev_list; ++ struct mutex mlock; /* protects vdev_list */ ++ struct workqueue_struct *kick_wq; ++ struct workqueue_struct *check_wq; ++}; ++ ++struct trusty_vring { ++ void *vaddr; ++ struct scatterlist sg; ++ trusty_shared_mem_id_t shared_mem_id; ++ size_t size; ++ unsigned int align; ++ unsigned int elem_num; ++ u32 notifyid; ++ atomic_t needs_kick; ++ struct fw_rsc_vdev_vring *vr_descr; ++ struct virtqueue *vq; ++ struct trusty_vdev *tvdev; ++ struct trusty_nop kick_nop; ++}; ++ ++struct trusty_vdev { ++ struct list_head node; ++ struct virtio_device vdev; ++ struct trusty_ctx *tctx; ++ u32 notifyid; ++ unsigned int config_len; ++ void *config; ++ struct fw_rsc_vdev *vdev_descr; ++ unsigned int vring_num; ++ struct trusty_vring vrings[]; ++}; ++ ++#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev) ++ ++static void check_all_vqs(struct work_struct *work) ++{ ++ unsigned int i; ++ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, ++ check_vqs); ++ struct trusty_vdev *tvdev; ++ ++ list_for_each_entry(tvdev, &tctx->vdev_list, node) { ++ for (i = 0; i < tvdev->vring_num; i++) ++ if (tvdev->vrings[i].vq) ++ vring_interrupt(0, tvdev->vrings[i].vq); ++ } ++} ++ ++static int trusty_call_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct trusty_ctx *tctx; ++ ++ if (action != TRUSTY_CALL_RETURNED) ++ return NOTIFY_DONE; ++ ++ tctx = container_of(nb, struct trusty_ctx, call_notifier); ++ queue_work(tctx->check_wq, &tctx->check_vqs); ++ ++ return NOTIFY_OK; ++} ++ ++static void kick_vq(struct trusty_ctx *tctx, ++ struct trusty_vdev *tvdev, ++ struct trusty_vring *tvr) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n", ++ __func__, tvdev->notifyid, tvr->notifyid); ++ ++ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ, ++ tvdev->notifyid, tvr->notifyid, 0); ++ if (ret) { ++ dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n", ++ tvdev->notifyid, tvr->notifyid, ret); ++ } ++} ++ ++static void kick_vqs(struct work_struct *work) ++{ ++ unsigned int i; ++ struct trusty_vdev *tvdev; ++ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, ++ kick_vqs); ++ mutex_lock(&tctx->mlock); ++ list_for_each_entry(tvdev, &tctx->vdev_list, node) { ++ for (i = 0; i < tvdev->vring_num; i++) { ++ struct trusty_vring *tvr = &tvdev->vrings[i]; ++ ++ if (atomic_xchg(&tvr->needs_kick, 0)) ++ kick_vq(tctx, tvdev, tvr); ++ } ++ } ++ mutex_unlock(&tctx->mlock); ++} ++ ++static bool trusty_virtio_notify(struct virtqueue *vq) ++{ ++ struct trusty_vring *tvr = vq->priv; ++ struct trusty_vdev *tvdev = tvr->tvdev; ++ struct trusty_ctx *tctx = tvdev->tctx; ++ u32 api_ver = trusty_get_api_version(tctx->dev->parent); ++ ++ if (api_ver < TRUSTY_API_VERSION_SMP_NOP) { ++ atomic_set(&tvr->needs_kick, 1); ++ queue_work(tctx->kick_wq, &tctx->kick_vqs); ++ } else { ++ trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop); ++ } ++ ++ return true; ++} ++ ++static int trusty_load_device_descr(struct trusty_ctx *tctx, ++ trusty_shared_mem_id_t id, size_t sz) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id); ++ ++ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_GET_DESCR, ++ (u32)id, id >> 32, sz); ++ if (ret < 0) { ++ dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n", ++ __func__, ret); ++ return -ENODEV; ++ } ++ return ret; ++} ++ ++static void trusty_virtio_stop(struct trusty_ctx *tctx, ++ trusty_shared_mem_id_t id, size_t sz) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id); ++ ++ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_STOP, ++ (u32)id, id >> 32, sz); ++ if (ret) { ++ dev_err(tctx->dev, "%s: virtio done returned (%d)\n", ++ __func__, ret); ++ return; ++ } ++} ++ ++static int trusty_virtio_start(struct trusty_ctx *tctx, ++ trusty_shared_mem_id_t id, size_t sz) ++{ ++ int ret; ++ ++ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id); ++ ++ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_START, ++ (u32)id, id >> 32, sz); ++ if (ret) { ++ dev_err(tctx->dev, "%s: virtio start returned (%d)\n", ++ __func__, ret); ++ return -ENODEV; ++ } ++ return 0; ++} ++ ++static void trusty_virtio_reset(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ struct trusty_ctx *tctx = tvdev->tctx; ++ ++ dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid); ++ trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET, ++ tvdev->notifyid, 0, 0); ++} ++ ++static u64 trusty_virtio_get_features(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ ++ return tvdev->vdev_descr->dfeatures | ++ (1ULL << VIRTIO_F_ACCESS_PLATFORM); ++} ++ ++static int trusty_virtio_finalize_features(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ u64 features = vdev->features; ++ ++ /* ++ * We set VIRTIO_F_ACCESS_PLATFORM to enable the dma mapping hooks. ++ * The other side does not need to know. ++ */ ++ features &= ~(1ULL << VIRTIO_F_ACCESS_PLATFORM); ++ ++ /* Make sure we don't have any features > 32 bits! */ ++ if (WARN_ON((u32)vdev->features != features)) ++ return -EINVAL; ++ ++ tvdev->vdev_descr->gfeatures = vdev->features; ++ return 0; ++} ++ ++static void trusty_virtio_get_config(struct virtio_device *vdev, ++ unsigned int offset, void *buf, ++ unsigned int len) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ ++ dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n", ++ __func__, len, offset); ++ ++ if (tvdev->config) { ++ if (offset + len <= tvdev->config_len) ++ memcpy(buf, tvdev->config + offset, len); ++ } ++} ++ ++static void trusty_virtio_set_config(struct virtio_device *vdev, ++ unsigned int offset, const void *buf, ++ unsigned int len) ++{ ++} ++ ++static u8 trusty_virtio_get_status(struct virtio_device *vdev) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ ++ return tvdev->vdev_descr->status; ++} ++ ++static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status) ++{ ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ ++ tvdev->vdev_descr->status = status; ++} ++ ++static void _del_vqs(struct virtio_device *vdev) ++{ ++ unsigned int i; ++ int ret; ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ struct trusty_vring *tvr = &tvdev->vrings[0]; ++ ++ for (i = 0; i < tvdev->vring_num; i++, tvr++) { ++ /* dequeue kick_nop */ ++ trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop); ++ ++ /* delete vq */ ++ if (tvr->vq) { ++ vring_del_virtqueue(tvr->vq); ++ tvr->vq = NULL; ++ } ++ /* delete vring */ ++ if (tvr->vaddr) { ++ ret = trusty_reclaim_memory(tvdev->tctx->dev->parent, ++ tvr->shared_mem_id, ++ &tvr->sg, 1); ++ if (WARN_ON(ret)) { ++ dev_err(&vdev->dev, ++ "trusty_revoke_memory failed: %d 0x%llx\n", ++ ret, tvr->shared_mem_id); ++ /* ++ * It is not safe to free this memory if ++ * trusty_revoke_memory fails. Leak it in that ++ * case. ++ */ ++ } else { ++ free_pages_exact(tvr->vaddr, tvr->size); ++ } ++ tvr->vaddr = NULL; ++ } ++ } ++} ++ ++static void trusty_virtio_del_vqs(struct virtio_device *vdev) ++{ ++ _del_vqs(vdev); ++} ++ ++ ++static struct virtqueue *_find_vq(struct virtio_device *vdev, ++ unsigned int id, ++ void (*callback)(struct virtqueue *vq), ++ const char *name, ++ bool ctx) ++{ ++ struct trusty_vring *tvr; ++ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); ++ phys_addr_t pa; ++ int ret; ++ ++ if (!name) ++ return ERR_PTR(-EINVAL); ++ ++ if (id >= tvdev->vring_num) ++ return ERR_PTR(-EINVAL); ++ ++ tvr = &tvdev->vrings[id]; ++ ++ /* actual size of vring (in bytes) */ ++ tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align)); ++ ++ /* allocate memory for the vring. */ ++ tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO); ++ if (!tvr->vaddr) { ++ dev_err(&vdev->dev, "vring alloc failed\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ sg_init_one(&tvr->sg, tvr->vaddr, tvr->size); ++ ret = trusty_share_memory_compat(tvdev->tctx->dev->parent, ++ &tvr->shared_mem_id, &tvr->sg, 1, ++ PAGE_KERNEL); ++ if (ret) { ++ pa = virt_to_phys(tvr->vaddr); ++ dev_err(&vdev->dev, "trusty_share_memory failed: %d %pa\n", ++ ret, &pa); ++ goto err_share_memory; ++ } ++ ++ /* save vring address to shared structure */ ++ tvr->vr_descr->da = (u32)tvr->shared_mem_id; ++ ++ /* da field is only 32 bit wide. Use previously unused 'reserved' field ++ * to store top 32 bits of 64-bit shared_mem_id ++ */ ++ tvr->vr_descr->pa = (u32)(tvr->shared_mem_id >> 32); ++ ++ dev_info(&vdev->dev, "vring%d: va(id) %p(%llx) qsz %d notifyid %d\n", ++ id, tvr->vaddr, (u64)tvr->shared_mem_id, tvr->elem_num, ++ tvr->notifyid); ++ ++ tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, ++ vdev, true, ctx, tvr->vaddr, ++ trusty_virtio_notify, callback, name); ++ if (!tvr->vq) { ++ dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", ++ name); ++ goto err_new_virtqueue; ++ } ++ ++ tvr->vq->priv = tvr; ++ ++ return tvr->vq; ++ ++err_new_virtqueue: ++ ret = trusty_reclaim_memory(tvdev->tctx->dev->parent, ++ tvr->shared_mem_id, &tvr->sg, 1); ++ if (WARN_ON(ret)) { ++ dev_err(&vdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n", ++ ret, tvr->shared_mem_id); ++ /* ++ * It is not safe to free this memory if trusty_revoke_memory ++ * fails. Leak it in that case. ++ */ ++ } else { ++err_share_memory: ++ free_pages_exact(tvr->vaddr, tvr->size); ++ } ++ tvr->vaddr = NULL; ++ return ERR_PTR(-ENOMEM); ++} ++ ++static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, ++ struct virtqueue *vqs[], ++ vq_callback_t *callbacks[], ++ const char * const names[], ++ const bool *ctxs, ++ struct irq_affinity *desc) ++{ ++ unsigned int i; ++ int ret; ++ bool ctx = false; ++ ++ for (i = 0; i < nvqs; i++) { ++ ctx = false; ++ if (ctxs) ++ ctx = ctxs[i]; ++ vqs[i] = _find_vq(vdev, i, callbacks[i], names[i], ctx); ++ if (IS_ERR(vqs[i])) { ++ ret = PTR_ERR(vqs[i]); ++ _del_vqs(vdev); ++ return ret; ++ } ++ } ++ return 0; ++} ++ ++static const char *trusty_virtio_bus_name(struct virtio_device *vdev) ++{ ++ return "trusty-virtio"; ++} ++ ++/* The ops structure which hooks everything together. */ ++static const struct virtio_config_ops trusty_virtio_config_ops = { ++ .get_features = trusty_virtio_get_features, ++ .finalize_features = trusty_virtio_finalize_features, ++ .get = trusty_virtio_get_config, ++ .set = trusty_virtio_set_config, ++ .get_status = trusty_virtio_get_status, ++ .set_status = trusty_virtio_set_status, ++ .reset = trusty_virtio_reset, ++ .find_vqs = trusty_virtio_find_vqs, ++ .del_vqs = trusty_virtio_del_vqs, ++ .bus_name = trusty_virtio_bus_name, ++}; ++ ++static int trusty_virtio_add_device(struct trusty_ctx *tctx, ++ struct fw_rsc_vdev *vdev_descr, ++ struct fw_rsc_vdev_vring *vr_descr, ++ void *config) ++{ ++ int i, ret; ++ struct trusty_vdev *tvdev; ++ ++ tvdev = kzalloc(struct_size(tvdev, vrings, vdev_descr->num_of_vrings), ++ GFP_KERNEL); ++ if (!tvdev) ++ return -ENOMEM; ++ ++ /* setup vdev */ ++ tvdev->tctx = tctx; ++ tvdev->vdev.dev.parent = tctx->dev; ++ tvdev->vdev.id.device = vdev_descr->id; ++ tvdev->vdev.config = &trusty_virtio_config_ops; ++ tvdev->vdev_descr = vdev_descr; ++ tvdev->notifyid = vdev_descr->notifyid; ++ ++ /* setup config */ ++ tvdev->config = config; ++ tvdev->config_len = vdev_descr->config_len; ++ ++ /* setup vrings and vdev resource */ ++ tvdev->vring_num = vdev_descr->num_of_vrings; ++ ++ for (i = 0; i < tvdev->vring_num; i++, vr_descr++) { ++ struct trusty_vring *tvr = &tvdev->vrings[i]; ++ ++ tvr->tvdev = tvdev; ++ tvr->vr_descr = vr_descr; ++ tvr->align = vr_descr->align; ++ tvr->elem_num = vr_descr->num; ++ tvr->notifyid = vr_descr->notifyid; ++ trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ, ++ tvdev->notifyid, tvr->notifyid); ++ } ++ ++ /* register device */ ++ ret = register_virtio_device(&tvdev->vdev); ++ if (ret) { ++ dev_err(tctx->dev, ++ "Failed (%d) to register device dev type %u\n", ++ ret, vdev_descr->id); ++ goto err_register; ++ } ++ ++ /* add it to tracking list */ ++ list_add_tail(&tvdev->node, &tctx->vdev_list); ++ ++ return 0; ++ ++err_register: ++ kfree(tvdev); ++ return ret; ++} ++ ++static int trusty_parse_device_descr(struct trusty_ctx *tctx, ++ void *descr_va, size_t descr_sz) ++{ ++ u32 i; ++ struct resource_table *descr = descr_va; ++ ++ if (descr_sz < sizeof(*descr)) { ++ dev_err(tctx->dev, "descr table is too small (0x%x)\n", ++ (int)descr_sz); ++ return -ENODEV; ++ } ++ ++ if (descr->ver != RSC_DESCR_VER) { ++ dev_err(tctx->dev, "unexpected descr ver (0x%x)\n", ++ (int)descr->ver); ++ return -ENODEV; ++ } ++ ++ if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) { ++ dev_err(tctx->dev, "descr table is too small (0x%x)\n", ++ (int)descr->ver); ++ return -ENODEV; ++ } ++ ++ for (i = 0; i < descr->num; i++) { ++ struct fw_rsc_hdr *hdr; ++ struct fw_rsc_vdev *vd; ++ struct fw_rsc_vdev_vring *vr; ++ void *cfg; ++ size_t vd_sz; ++ ++ u32 offset = descr->offset[i]; ++ ++ if (offset >= descr_sz) { ++ dev_err(tctx->dev, "offset is out of bounds (%u)\n", ++ offset); ++ return -ENODEV; ++ } ++ ++ /* check space for rsc header */ ++ if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) { ++ dev_err(tctx->dev, "no space for rsc header (%u)\n", ++ offset); ++ return -ENODEV; ++ } ++ hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset); ++ offset += sizeof(struct fw_rsc_hdr); ++ ++ /* check type */ ++ if (hdr->type != RSC_VDEV) { ++ dev_err(tctx->dev, "unsupported rsc type (%u)\n", ++ hdr->type); ++ continue; ++ } ++ ++ /* got vdev: check space for vdev */ ++ if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) { ++ dev_err(tctx->dev, "no space for vdev descr (%u)\n", ++ offset); ++ return -ENODEV; ++ } ++ vd = (struct fw_rsc_vdev *)((u8 *)descr + offset); ++ ++ /* check space for vrings and config area */ ++ vd_sz = sizeof(struct fw_rsc_vdev) + ++ vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) + ++ vd->config_len; ++ ++ if ((descr_sz - offset) < vd_sz) { ++ dev_err(tctx->dev, "no space for vdev (%u)\n", offset); ++ return -ENODEV; ++ } ++ vr = (struct fw_rsc_vdev_vring *)vd->vring; ++ cfg = (void *)(vr + vd->num_of_vrings); ++ ++ trusty_virtio_add_device(tctx, vd, vr, cfg); ++ } ++ ++ return 0; ++} ++ ++static void _remove_devices_locked(struct trusty_ctx *tctx) ++{ ++ struct trusty_vdev *tvdev, *next; ++ ++ list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) { ++ list_del(&tvdev->node); ++ unregister_virtio_device(&tvdev->vdev); ++ kfree(tvdev); ++ } ++} ++ ++static void trusty_virtio_remove_devices(struct trusty_ctx *tctx) ++{ ++ mutex_lock(&tctx->mlock); ++ _remove_devices_locked(tctx); ++ mutex_unlock(&tctx->mlock); ++} ++ ++static int trusty_virtio_add_devices(struct trusty_ctx *tctx) ++{ ++ int ret; ++ int ret_tmp; ++ void *descr_va; ++ trusty_shared_mem_id_t descr_id; ++ size_t descr_sz; ++ size_t descr_buf_sz; ++ ++ /* allocate buffer to load device descriptor into */ ++ descr_buf_sz = PAGE_SIZE; ++ descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO); ++ if (!descr_va) { ++ dev_err(tctx->dev, "Failed to allocate shared area\n"); ++ return -ENOMEM; ++ } ++ ++ sg_init_one(&tctx->shared_sg, descr_va, descr_buf_sz); ++ ret = trusty_share_memory(tctx->dev->parent, &descr_id, ++ &tctx->shared_sg, 1, PAGE_KERNEL); ++ if (ret) { ++ dev_err(tctx->dev, "trusty_share_memory failed: %d\n", ret); ++ goto err_share_memory; ++ } ++ ++ /* load device descriptors */ ++ ret = trusty_load_device_descr(tctx, descr_id, descr_buf_sz); ++ if (ret < 0) { ++ dev_err(tctx->dev, "failed (%d) to load device descr\n", ret); ++ goto err_load_descr; ++ } ++ ++ descr_sz = (size_t)ret; ++ ++ mutex_lock(&tctx->mlock); ++ ++ /* parse device descriptor and add virtio devices */ ++ ret = trusty_parse_device_descr(tctx, descr_va, descr_sz); ++ if (ret) { ++ dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret); ++ goto err_parse_descr; ++ } ++ ++ /* register call notifier */ ++ ret = trusty_call_notifier_register(tctx->dev->parent, ++ &tctx->call_notifier); ++ if (ret) { ++ dev_err(tctx->dev, "%s: failed (%d) to register notifier\n", ++ __func__, ret); ++ goto err_register_notifier; ++ } ++ ++ /* start virtio */ ++ ret = trusty_virtio_start(tctx, descr_id, descr_sz); ++ if (ret) { ++ dev_err(tctx->dev, "failed (%d) to start virtio\n", ret); ++ goto err_start_virtio; ++ } ++ ++ /* attach shared area */ ++ tctx->shared_va = descr_va; ++ tctx->shared_id = descr_id; ++ tctx->shared_sz = descr_buf_sz; ++ ++ mutex_unlock(&tctx->mlock); ++ ++ return 0; ++ ++err_start_virtio: ++ trusty_call_notifier_unregister(tctx->dev->parent, ++ &tctx->call_notifier); ++ cancel_work_sync(&tctx->check_vqs); ++err_register_notifier: ++err_parse_descr: ++ _remove_devices_locked(tctx); ++ mutex_unlock(&tctx->mlock); ++ cancel_work_sync(&tctx->kick_vqs); ++ trusty_virtio_stop(tctx, descr_id, descr_sz); ++err_load_descr: ++ ret_tmp = trusty_reclaim_memory(tctx->dev->parent, descr_id, ++ &tctx->shared_sg, 1); ++ if (WARN_ON(ret_tmp)) { ++ dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n", ++ ret_tmp, tctx->shared_id); ++ /* ++ * It is not safe to free this memory if trusty_revoke_memory ++ * fails. Leak it in that case. ++ */ ++ } else { ++err_share_memory: ++ free_pages_exact(descr_va, descr_buf_sz); ++ } ++ return ret; ++} ++ ++static dma_addr_t trusty_virtio_dma_map_page(struct device *dev, ++ struct page *page, ++ unsigned long offset, size_t size, ++ enum dma_data_direction dir, ++ unsigned long attrs) ++{ ++ struct tipc_msg_buf *buf = page_to_virt(page) + offset; ++ ++ return buf->buf_id; ++} ++ ++static const struct dma_map_ops trusty_virtio_dma_map_ops = { ++ .map_page = trusty_virtio_dma_map_page, ++}; ++ ++static int trusty_virtio_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct trusty_ctx *tctx; ++ ++ tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); ++ if (!tctx) ++ return -ENOMEM; ++ ++ tctx->dev = &pdev->dev; ++ tctx->call_notifier.notifier_call = trusty_call_notify; ++ mutex_init(&tctx->mlock); ++ INIT_LIST_HEAD(&tctx->vdev_list); ++ INIT_WORK(&tctx->check_vqs, check_all_vqs); ++ INIT_WORK(&tctx->kick_vqs, kick_vqs); ++ platform_set_drvdata(pdev, tctx); ++ ++ set_dma_ops(&pdev->dev, &trusty_virtio_dma_map_ops); ++ ++ tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0); ++ if (!tctx->check_wq) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed create trusty-check-wq\n"); ++ goto err_create_check_wq; ++ } ++ ++ tctx->kick_wq = alloc_workqueue("trusty-kick-wq", ++ WQ_UNBOUND | WQ_CPU_INTENSIVE, 0); ++ if (!tctx->kick_wq) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed create trusty-kick-wq\n"); ++ goto err_create_kick_wq; ++ } ++ ++ ret = trusty_virtio_add_devices(tctx); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to add virtio devices\n"); ++ goto err_add_devices; ++ } ++ ++ dev_info(&pdev->dev, "initializing done\n"); ++ return 0; ++ ++err_add_devices: ++ destroy_workqueue(tctx->kick_wq); ++err_create_kick_wq: ++ destroy_workqueue(tctx->check_wq); ++err_create_check_wq: ++ kfree(tctx); ++ return ret; ++} ++ ++static int trusty_virtio_remove(struct platform_device *pdev) ++{ ++ struct trusty_ctx *tctx = platform_get_drvdata(pdev); ++ int ret; ++ ++ /* unregister call notifier and wait until workqueue is done */ ++ trusty_call_notifier_unregister(tctx->dev->parent, ++ &tctx->call_notifier); ++ cancel_work_sync(&tctx->check_vqs); ++ ++ /* remove virtio devices */ ++ trusty_virtio_remove_devices(tctx); ++ cancel_work_sync(&tctx->kick_vqs); ++ ++ /* destroy workqueues */ ++ destroy_workqueue(tctx->kick_wq); ++ destroy_workqueue(tctx->check_wq); ++ ++ /* notify remote that shared area goes away */ ++ trusty_virtio_stop(tctx, tctx->shared_id, tctx->shared_sz); ++ ++ /* free shared area */ ++ ret = trusty_reclaim_memory(tctx->dev->parent, tctx->shared_id, ++ &tctx->shared_sg, 1); ++ if (WARN_ON(ret)) { ++ dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n", ++ ret, tctx->shared_id); ++ /* ++ * It is not safe to free this memory if trusty_revoke_memory ++ * fails. Leak it in that case. ++ */ ++ } else { ++ free_pages_exact(tctx->shared_va, tctx->shared_sz); ++ } ++ ++ /* free context */ ++ kfree(tctx); ++ return 0; ++} ++ ++static const struct of_device_id trusty_of_match[] = { ++ { ++ .compatible = "android,trusty-virtio-v1", ++ }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, trusty_of_match); ++ ++static struct platform_driver trusty_virtio_driver = { ++ .probe = trusty_virtio_probe, ++ .remove = trusty_virtio_remove, ++ .driver = { ++ .name = "trusty-virtio", ++ .of_match_table = trusty_of_match, ++ }, ++}; ++ ++module_platform_driver(trusty_virtio_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty virtio driver"); ++/* ++ * TODO(b/168322325): trusty-virtio and trusty-ipc should be independent. ++ * However, trusty-virtio is not completely generic and is aware of trusty-ipc. ++ * See header includes. Particularly, trusty-virtio.ko can't be loaded before ++ * trusty-ipc.ko. ++ */ ++MODULE_SOFTDEP("pre: trusty-ipc"); +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +new file mode 100644 +index 000000000000..265eab52aea0 +--- /dev/null ++++ b/drivers/trusty/trusty.c +@@ -0,0 +1,981 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2013 Google, Inc. ++ */ ++ ++#include <linux/delay.h> ++#include <linux/module.h> ++#include <linux/of.h> ++#include <linux/of_platform.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/stat.h> ++#include <linux/string.h> ++#include <linux/trusty/arm_ffa.h> ++#include <linux/trusty/smcall.h> ++#include <linux/trusty/sm_err.h> ++#include <linux/trusty/trusty.h> ++ ++#include <linux/scatterlist.h> ++#include <linux/dma-mapping.h> ++ ++#include "trusty-smc.h" ++ ++struct trusty_state; ++static struct platform_driver trusty_driver; ++ ++struct trusty_work { ++ struct trusty_state *ts; ++ struct work_struct work; ++}; ++ ++struct trusty_state { ++ struct mutex smc_lock; ++ struct atomic_notifier_head notifier; ++ struct completion cpu_idle_completion; ++ char *version_str; ++ u32 api_version; ++ bool trusty_panicked; ++ struct device *dev; ++ struct workqueue_struct *nop_wq; ++ struct trusty_work __percpu *nop_works; ++ struct list_head nop_queue; ++ spinlock_t nop_lock; /* protects nop_queue */ ++ struct device_dma_parameters dma_parms; ++ void *ffa_tx; ++ void *ffa_rx; ++ u16 ffa_local_id; ++ u16 ffa_remote_id; ++ struct mutex share_memory_msg_lock; /* protects share_memory_msg */ ++}; ++ ++static inline unsigned long smc(unsigned long r0, unsigned long r1, ++ unsigned long r2, unsigned long r3) ++{ ++ return trusty_smc8(r0, r1, r2, r3, 0, 0, 0, 0).r0; ++} ++ ++s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(!s)) ++ return SM_ERR_INVALID_PARAMETERS; ++ if (WARN_ON(!SMC_IS_FASTCALL(smcnr))) ++ return SM_ERR_INVALID_PARAMETERS; ++ if (WARN_ON(SMC_IS_SMC64(smcnr))) ++ return SM_ERR_INVALID_PARAMETERS; ++ ++ return smc(smcnr, a0, a1, a2); ++} ++EXPORT_SYMBOL(trusty_fast_call32); ++ ++#ifdef CONFIG_64BIT ++s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(!s)) ++ return SM_ERR_INVALID_PARAMETERS; ++ if (WARN_ON(!SMC_IS_FASTCALL(smcnr))) ++ return SM_ERR_INVALID_PARAMETERS; ++ if (WARN_ON(!SMC_IS_SMC64(smcnr))) ++ return SM_ERR_INVALID_PARAMETERS; ++ ++ return smc(smcnr, a0, a1, a2); ++} ++EXPORT_SYMBOL(trusty_fast_call64); ++#endif ++ ++static unsigned long trusty_std_call_inner(struct device *dev, ++ unsigned long smcnr, ++ unsigned long a0, unsigned long a1, ++ unsigned long a2) ++{ ++ unsigned long ret; ++ int retry = 5; ++ ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n", ++ __func__, smcnr, a0, a1, a2); ++ while (true) { ++ ret = smc(smcnr, a0, a1, a2); ++ while ((s32)ret == SM_ERR_FIQ_INTERRUPTED) ++ ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0); ++ if ((int)ret != SM_ERR_BUSY || !retry) ++ break; ++ ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n", ++ __func__, smcnr, a0, a1, a2); ++ retry--; ++ } ++ ++ return ret; ++} ++ ++static unsigned long trusty_std_call_helper(struct device *dev, ++ unsigned long smcnr, ++ unsigned long a0, unsigned long a1, ++ unsigned long a2) ++{ ++ unsigned long ret; ++ int sleep_time = 1; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ while (true) { ++ local_irq_disable(); ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, ++ NULL); ++ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); ++ if (ret == SM_ERR_PANIC) { ++ s->trusty_panicked = true; ++ if (IS_ENABLED(CONFIG_TRUSTY_CRASH_IS_PANIC)) ++ panic("trusty crashed"); ++ else ++ WARN_ONCE(1, "trusty crashed"); ++ } ++ ++ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, ++ NULL); ++ if (ret == SM_ERR_INTERRUPTED) { ++ /* ++ * Make sure this cpu will eventually re-enter trusty ++ * even if the std_call resumes on another cpu. ++ */ ++ trusty_enqueue_nop(dev, NULL); ++ } ++ local_irq_enable(); ++ ++ if ((int)ret != SM_ERR_BUSY) ++ break; ++ ++ if (sleep_time == 256) ++ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n", ++ __func__, smcnr, a0, a1, a2); ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n", ++ __func__, smcnr, a0, a1, a2, sleep_time); ++ ++ msleep(sleep_time); ++ if (sleep_time < 1000) ++ sleep_time <<= 1; ++ ++ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n", ++ __func__, smcnr, a0, a1, a2); ++ } ++ ++ if (sleep_time > 256) ++ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n", ++ __func__, smcnr, a0, a1, a2); ++ ++ return ret; ++} ++ ++static void trusty_std_call_cpu_idle(struct trusty_state *s) ++{ ++ int ret; ++ ++ ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10); ++ if (!ret) { ++ dev_warn(s->dev, ++ "%s: timed out waiting for cpu idle to clear, retry anyway\n", ++ __func__); ++ } ++} ++ ++s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) ++{ ++ int ret; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(SMC_IS_FASTCALL(smcnr))) ++ return SM_ERR_INVALID_PARAMETERS; ++ ++ if (WARN_ON(SMC_IS_SMC64(smcnr))) ++ return SM_ERR_INVALID_PARAMETERS; ++ ++ if (s->trusty_panicked) { ++ /* ++ * Avoid calling the notifiers if trusty has panicked as they ++ * can trigger more calls. ++ */ ++ return SM_ERR_PANIC; ++ } ++ ++ if (smcnr != SMC_SC_NOP) { ++ mutex_lock(&s->smc_lock); ++ reinit_completion(&s->cpu_idle_completion); ++ } ++ ++ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n", ++ __func__, smcnr, a0, a1, a2); ++ ++ ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2); ++ while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) { ++ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n", ++ __func__, smcnr, a0, a1, a2); ++ if (ret == SM_ERR_CPU_IDLE) ++ trusty_std_call_cpu_idle(s); ++ ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0); ++ } ++ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n", ++ __func__, smcnr, a0, a1, a2, ret); ++ ++ if (smcnr == SMC_SC_NOP) ++ complete(&s->cpu_idle_completion); ++ else ++ mutex_unlock(&s->smc_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL(trusty_std_call32); ++ ++int trusty_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot) ++{ ++ return trusty_transfer_memory(dev, id, sglist, nents, pgprot, 0, ++ false); ++} ++EXPORT_SYMBOL(trusty_share_memory); ++ ++int trusty_transfer_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag, bool lend) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ int ret; ++ struct ns_mem_page_info pg_inf; ++ struct scatterlist *sg; ++ size_t count; ++ size_t i; ++ size_t len; ++ u64 ffa_handle = 0; ++ size_t total_len; ++ size_t endpoint_count = 1; ++ struct ffa_mtd *mtd = s->ffa_tx; ++ size_t comp_mrd_offset = offsetof(struct ffa_mtd, emad[endpoint_count]); ++ struct ffa_comp_mrd *comp_mrd = s->ffa_tx + comp_mrd_offset; ++ struct ffa_cons_mrd *cons_mrd = comp_mrd->address_range_array; ++ size_t cons_mrd_offset = (void *)cons_mrd - s->ffa_tx; ++ struct smc_ret8 smc_ret; ++ u32 cookie_low; ++ u32 cookie_high; ++ ++ if (WARN_ON(dev->driver != &trusty_driver.driver)) ++ return -EINVAL; ++ ++ if (WARN_ON(nents < 1)) ++ return -EINVAL; ++ ++ if (nents != 1 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { ++ dev_err(s->dev, "%s: old trusty version does not support non-contiguous memory objects\n", ++ __func__); ++ return -EOPNOTSUPP; ++ } ++ ++ count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ if (count != nents) { ++ dev_err(s->dev, "failed to dma map sg_table\n"); ++ return -EINVAL; ++ } ++ ++ sg = sglist; ++ ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)), ++ pgprot); ++ if (ret) { ++ dev_err(s->dev, "%s: trusty_encode_page_info failed\n", ++ __func__); ++ goto err_encode_page_info; ++ } ++ ++ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { ++ *id = pg_inf.compat_attr; ++ return 0; ++ } ++ ++ len = 0; ++ for_each_sg(sglist, sg, nents, i) ++ len += sg_dma_len(sg); ++ ++ mutex_lock(&s->share_memory_msg_lock); ++ ++ mtd->sender_id = s->ffa_local_id; ++ mtd->memory_region_attributes = pg_inf.ffa_mem_attr; ++ mtd->reserved_3 = 0; ++ mtd->flags = 0; ++ mtd->handle = 0; ++ mtd->tag = tag; ++ mtd->reserved_24_27 = 0; ++ mtd->emad_count = endpoint_count; ++ for (i = 0; i < endpoint_count; i++) { ++ struct ffa_emad *emad = &mtd->emad[i]; ++ /* TODO: support stream ids */ ++ emad->mapd.endpoint_id = s->ffa_remote_id; ++ emad->mapd.memory_access_permissions = pg_inf.ffa_mem_perm; ++ emad->mapd.flags = 0; ++ emad->comp_mrd_offset = comp_mrd_offset; ++ emad->reserved_8_15 = 0; ++ } ++ comp_mrd->total_page_count = len / PAGE_SIZE; ++ comp_mrd->address_range_count = nents; ++ comp_mrd->reserved_8_15 = 0; ++ ++ total_len = cons_mrd_offset + nents * sizeof(*cons_mrd); ++ sg = sglist; ++ while (count) { ++ size_t lcount = ++ min_t(size_t, count, (PAGE_SIZE - cons_mrd_offset) / ++ sizeof(*cons_mrd)); ++ size_t fragment_len = lcount * sizeof(*cons_mrd) + ++ cons_mrd_offset; ++ ++ for (i = 0; i < lcount; i++) { ++ cons_mrd[i].address = sg_dma_address(sg); ++ cons_mrd[i].page_count = sg_dma_len(sg) / PAGE_SIZE; ++ cons_mrd[i].reserved_12_15 = 0; ++ sg = sg_next(sg); ++ } ++ count -= lcount; ++ if (cons_mrd_offset) { ++ u32 smc = lend ? SMC_FC_FFA_MEM_LEND : ++ SMC_FC_FFA_MEM_SHARE; ++ /* First fragment */ ++ smc_ret = trusty_smc8(smc, total_len, ++ fragment_len, 0, 0, 0, 0, 0); ++ } else { ++ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_FRAG_TX, ++ cookie_low, cookie_high, ++ fragment_len, 0, 0, 0, 0); ++ } ++ if (smc_ret.r0 == SMC_FC_FFA_MEM_FRAG_RX) { ++ cookie_low = smc_ret.r1; ++ cookie_high = smc_ret.r2; ++ dev_dbg(s->dev, "cookie %x %x", cookie_low, ++ cookie_high); ++ if (!count) { ++ /* ++ * We have sent all our descriptors. Expected ++ * SMC_FC_FFA_SUCCESS, not a request to send ++ * another fragment. ++ */ ++ dev_err(s->dev, "%s: fragment_len %zd/%zd, unexpected SMC_FC_FFA_MEM_FRAG_RX\n", ++ __func__, fragment_len, total_len); ++ ret = -EIO; ++ break; ++ } ++ } else if (smc_ret.r0 == SMC_FC_FFA_SUCCESS) { ++ ffa_handle = smc_ret.r2 | (u64)smc_ret.r3 << 32; ++ dev_dbg(s->dev, "%s: fragment_len %zu/%zu, got handle 0x%llx\n", ++ __func__, fragment_len, total_len, ++ ffa_handle); ++ if (count) { ++ /* ++ * We have not sent all our descriptors. ++ * Expected SMC_FC_FFA_MEM_FRAG_RX not ++ * SMC_FC_FFA_SUCCESS. ++ */ ++ dev_err(s->dev, "%s: fragment_len %zu/%zu, unexpected SMC_FC_FFA_SUCCESS, count %zu != 0\n", ++ __func__, fragment_len, total_len, ++ count); ++ ret = -EIO; ++ break; ++ } ++ } else { ++ dev_err(s->dev, "%s: fragment_len %zu/%zu, SMC_FC_FFA_MEM_SHARE failed 0x%lx 0x%lx 0x%lx", ++ __func__, fragment_len, total_len, ++ smc_ret.r0, smc_ret.r1, smc_ret.r2); ++ ret = -EIO; ++ break; ++ } ++ ++ cons_mrd = s->ffa_tx; ++ cons_mrd_offset = 0; ++ } ++ ++ mutex_unlock(&s->share_memory_msg_lock); ++ ++ if (!ret) { ++ *id = ffa_handle; ++ dev_dbg(s->dev, "%s: done\n", __func__); ++ return 0; ++ } ++ ++ dev_err(s->dev, "%s: failed %d", __func__, ret); ++ ++err_encode_page_info: ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ return ret; ++} ++EXPORT_SYMBOL(trusty_transfer_memory); ++ ++/* ++ * trusty_share_memory_compat - trusty_share_memory wrapper for old apis ++ * ++ * Call trusty_share_memory and filter out memory attributes if trusty version ++ * is old. Used by clients that used to pass just a physical address to trusty ++ * instead of a physical address plus memory attributes value. ++ */ ++int trusty_share_memory_compat(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot) ++{ ++ int ret; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ ret = trusty_share_memory(dev, id, sglist, nents, pgprot); ++ if (!ret && s->api_version < TRUSTY_API_VERSION_PHYS_MEM_OBJ) ++ *id &= 0x0000FFFFFFFFF000ull; ++ ++ return ret; ++} ++EXPORT_SYMBOL(trusty_share_memory_compat); ++ ++int trusty_reclaim_memory(struct device *dev, u64 id, ++ struct scatterlist *sglist, unsigned int nents) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ int ret = 0; ++ struct smc_ret8 smc_ret; ++ ++ if (WARN_ON(dev->driver != &trusty_driver.driver)) ++ return -EINVAL; ++ ++ if (WARN_ON(nents < 1)) ++ return -EINVAL; ++ ++ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { ++ if (nents != 1) { ++ dev_err(s->dev, "%s: not supported\n", __func__); ++ return -EOPNOTSUPP; ++ } ++ ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++ return 0; ++ } ++ ++ mutex_lock(&s->share_memory_msg_lock); ++ ++ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_RECLAIM, (u32)id, id >> 32, 0, 0, ++ 0, 0, 0); ++ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { ++ dev_err(s->dev, "%s: SMC_FC_FFA_MEM_RECLAIM failed 0x%lx 0x%lx 0x%lx", ++ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); ++ if (smc_ret.r0 == SMC_FC_FFA_ERROR && ++ smc_ret.r2 == FFA_ERROR_DENIED) ++ ret = -EBUSY; ++ else ++ ret = -EIO; ++ } ++ ++ mutex_unlock(&s->share_memory_msg_lock); ++ ++ if (ret != 0) ++ return ret; ++ ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++ return 0; ++} ++EXPORT_SYMBOL(trusty_reclaim_memory); ++ ++int trusty_call_notifier_register(struct device *dev, struct notifier_block *n) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return atomic_notifier_chain_register(&s->notifier, n); ++} ++EXPORT_SYMBOL(trusty_call_notifier_register); ++ ++int trusty_call_notifier_unregister(struct device *dev, ++ struct notifier_block *n) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return atomic_notifier_chain_unregister(&s->notifier, n); ++} ++EXPORT_SYMBOL(trusty_call_notifier_unregister); ++ ++static int trusty_remove_child(struct device *dev, void *data) ++{ ++ platform_device_unregister(to_platform_device(dev)); ++ return 0; ++} ++ ++static ssize_t trusty_version_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str ?: "unknown"); ++} ++ ++static DEVICE_ATTR(trusty_version, 0400, trusty_version_show, NULL); ++ ++static struct attribute *trusty_attrs[] = { ++ &dev_attr_trusty_version.attr, ++ NULL, ++}; ++ATTRIBUTE_GROUPS(trusty); ++ ++const char *trusty_version_str_get(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return s->version_str; ++} ++EXPORT_SYMBOL(trusty_version_str_get); ++ ++static int trusty_init_msg_buf(struct trusty_state *s, struct device *dev) ++{ ++ phys_addr_t tx_paddr; ++ phys_addr_t rx_paddr; ++ int ret; ++ struct smc_ret8 smc_ret; ++ ++ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) ++ return 0; ++ ++ /* Get supported FF-A version and check if it is compatible */ ++ smc_ret = trusty_smc8(SMC_FC_FFA_VERSION, FFA_CURRENT_VERSION, 0, 0, ++ 0, 0, 0, 0); ++ if (FFA_VERSION_TO_MAJOR(smc_ret.r0) != FFA_CURRENT_VERSION_MAJOR) { ++ dev_err(s->dev, ++ "%s: Unsupported FF-A version 0x%lx, expected 0x%x\n", ++ __func__, smc_ret.r0, FFA_CURRENT_VERSION); ++ ret = -EIO; ++ goto err_version; ++ } ++ ++ /* Check that SMC_FC_FFA_MEM_SHARE is implemented */ ++ smc_ret = trusty_smc8(SMC_FC_FFA_FEATURES, SMC_FC_FFA_MEM_SHARE, 0, 0, ++ 0, 0, 0, 0); ++ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { ++ dev_err(s->dev, ++ "%s: SMC_FC_FFA_FEATURES(SMC_FC_FFA_MEM_SHARE) failed 0x%lx 0x%lx 0x%lx\n", ++ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); ++ ret = -EIO; ++ goto err_features; ++ } ++ ++ /* ++ * Set FF-A endpoint IDs. ++ * ++ * Hardcode 0x8000 for the secure os. ++ * TODO: Use FF-A call or device tree to configure this dynamically ++ */ ++ smc_ret = trusty_smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0); ++ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { ++ dev_err(s->dev, ++ "%s: SMC_FC_FFA_ID_GET failed 0x%lx 0x%lx 0x%lx\n", ++ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); ++ ret = -EIO; ++ goto err_id_get; ++ } ++ ++ s->ffa_local_id = smc_ret.r2; ++ s->ffa_remote_id = 0x8000; ++ ++ s->ffa_tx = kmalloc(PAGE_SIZE, GFP_KERNEL); ++ if (!s->ffa_tx) { ++ ret = -ENOMEM; ++ goto err_alloc_tx; ++ } ++ tx_paddr = virt_to_phys(s->ffa_tx); ++ if (WARN_ON(tx_paddr & (PAGE_SIZE - 1))) { ++ ret = -EINVAL; ++ goto err_unaligned_tx_buf; ++ } ++ ++ s->ffa_rx = kmalloc(PAGE_SIZE, GFP_KERNEL); ++ if (!s->ffa_rx) { ++ ret = -ENOMEM; ++ goto err_alloc_rx; ++ } ++ rx_paddr = virt_to_phys(s->ffa_rx); ++ if (WARN_ON(rx_paddr & (PAGE_SIZE - 1))) { ++ ret = -EINVAL; ++ goto err_unaligned_rx_buf; ++ } ++ ++ smc_ret = trusty_smc8(SMC_FCZ_FFA_RXTX_MAP, tx_paddr, rx_paddr, 1, 0, ++ 0, 0, 0); ++ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { ++ dev_err(s->dev, "%s: SMC_FCZ_FFA_RXTX_MAP failed 0x%lx 0x%lx 0x%lx\n", ++ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); ++ ret = -EIO; ++ goto err_rxtx_map; ++ } ++ ++ return 0; ++ ++err_rxtx_map: ++err_unaligned_rx_buf: ++ kfree(s->ffa_rx); ++ s->ffa_rx = NULL; ++err_alloc_rx: ++err_unaligned_tx_buf: ++ kfree(s->ffa_tx); ++ s->ffa_tx = NULL; ++err_alloc_tx: ++err_id_get: ++err_features: ++err_version: ++ return ret; ++} ++ ++static void trusty_free_msg_buf(struct trusty_state *s, struct device *dev) ++{ ++ struct smc_ret8 smc_ret; ++ ++ smc_ret = trusty_smc8(SMC_FC_FFA_RXTX_UNMAP, 0, 0, 0, 0, 0, 0, 0); ++ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { ++ dev_err(s->dev, "%s: SMC_FC_FFA_RXTX_UNMAP failed 0x%lx 0x%lx 0x%lx\n", ++ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); ++ } else { ++ kfree(s->ffa_rx); ++ kfree(s->ffa_tx); ++ } ++} ++ ++static void trusty_init_version(struct trusty_state *s, struct device *dev) ++{ ++ int ret; ++ int i; ++ int version_str_len; ++ ++ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0); ++ if (ret <= 0) ++ goto err_get_size; ++ ++ version_str_len = ret; ++ ++ s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL); ++ for (i = 0; i < version_str_len; i++) { ++ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0); ++ if (ret < 0) ++ goto err_get_char; ++ s->version_str[i] = ret; ++ } ++ s->version_str[i] = '\0'; ++ ++ dev_info(dev, "trusty version: %s\n", s->version_str); ++ return; ++ ++err_get_char: ++ kfree(s->version_str); ++ s->version_str = NULL; ++err_get_size: ++ dev_err(dev, "failed to get version: %d\n", ret); ++} ++ ++u32 trusty_get_api_version(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ return s->api_version; ++} ++EXPORT_SYMBOL(trusty_get_api_version); ++ ++bool trusty_get_panic_status(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ if (WARN_ON(dev->driver != &trusty_driver.driver)) ++ return false; ++ return s->trusty_panicked; ++} ++EXPORT_SYMBOL(trusty_get_panic_status); ++ ++static int trusty_init_api_version(struct trusty_state *s, struct device *dev) ++{ ++ u32 api_version; ++ ++ api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION, ++ TRUSTY_API_VERSION_CURRENT, 0, 0); ++ if (api_version == SM_ERR_UNDEFINED_SMC) ++ api_version = 0; ++ ++ if (api_version > TRUSTY_API_VERSION_CURRENT) { ++ dev_err(dev, "unsupported api version %u > %u\n", ++ api_version, TRUSTY_API_VERSION_CURRENT); ++ return -EINVAL; ++ } ++ ++ dev_info(dev, "selected api version: %u (requested %u)\n", ++ api_version, TRUSTY_API_VERSION_CURRENT); ++ s->api_version = api_version; ++ ++ return 0; ++} ++ ++static bool dequeue_nop(struct trusty_state *s, u32 *args) ++{ ++ unsigned long flags; ++ struct trusty_nop *nop = NULL; ++ ++ spin_lock_irqsave(&s->nop_lock, flags); ++ if (!list_empty(&s->nop_queue)) { ++ nop = list_first_entry(&s->nop_queue, ++ struct trusty_nop, node); ++ list_del_init(&nop->node); ++ args[0] = nop->args[0]; ++ args[1] = nop->args[1]; ++ args[2] = nop->args[2]; ++ } else { ++ args[0] = 0; ++ args[1] = 0; ++ args[2] = 0; ++ } ++ spin_unlock_irqrestore(&s->nop_lock, flags); ++ return nop; ++} ++ ++static void locked_nop_work_func(struct work_struct *work) ++{ ++ int ret; ++ struct trusty_work *tw = container_of(work, struct trusty_work, work); ++ struct trusty_state *s = tw->ts; ++ ++ ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0); ++ if (ret != 0) ++ dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d", ++ __func__, ret); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++} ++ ++static void nop_work_func(struct work_struct *work) ++{ ++ int ret; ++ bool next; ++ u32 args[3]; ++ u32 last_arg0; ++ struct trusty_work *tw = container_of(work, struct trusty_work, work); ++ struct trusty_state *s = tw->ts; ++ ++ dequeue_nop(s, args); ++ do { ++ dev_dbg(s->dev, "%s: %x %x %x\n", ++ __func__, args[0], args[1], args[2]); ++ ++ last_arg0 = args[0]; ++ ret = trusty_std_call32(s->dev, SMC_SC_NOP, ++ args[0], args[1], args[2]); ++ ++ next = dequeue_nop(s, args); ++ ++ if (ret == SM_ERR_NOP_INTERRUPTED) { ++ next = true; ++ } else if (ret != SM_ERR_NOP_DONE) { ++ dev_err(s->dev, "%s: SMC_SC_NOP %x failed %d", ++ __func__, last_arg0, ret); ++ if (last_arg0) { ++ /* ++ * Don't break out of the loop if a non-default ++ * nop-handler returns an error. ++ */ ++ next = true; ++ } ++ } ++ } while (next); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++} ++ ++void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop) ++{ ++ unsigned long flags; ++ struct trusty_work *tw; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ preempt_disable(); ++ tw = this_cpu_ptr(s->nop_works); ++ if (nop) { ++ WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP); ++ ++ spin_lock_irqsave(&s->nop_lock, flags); ++ if (list_empty(&nop->node)) ++ list_add_tail(&nop->node, &s->nop_queue); ++ spin_unlock_irqrestore(&s->nop_lock, flags); ++ } ++ queue_work(s->nop_wq, &tw->work); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(trusty_enqueue_nop); ++ ++void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop) ++{ ++ unsigned long flags; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(!nop)) ++ return; ++ ++ spin_lock_irqsave(&s->nop_lock, flags); ++ if (!list_empty(&nop->node)) ++ list_del_init(&nop->node); ++ spin_unlock_irqrestore(&s->nop_lock, flags); ++} ++EXPORT_SYMBOL(trusty_dequeue_nop); ++ ++static int trusty_probe(struct platform_device *pdev) ++{ ++ int ret; ++ unsigned int cpu; ++ work_func_t work_func; ++ struct trusty_state *s; ++ struct device_node *node = pdev->dev.of_node; ++ ++ if (!node) { ++ dev_err(&pdev->dev, "of_node required\n"); ++ return -EINVAL; ++ } ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) { ++ ret = -ENOMEM; ++ goto err_allocate_state; ++ } ++ ++ s->dev = &pdev->dev; ++ spin_lock_init(&s->nop_lock); ++ INIT_LIST_HEAD(&s->nop_queue); ++ mutex_init(&s->smc_lock); ++ mutex_init(&s->share_memory_msg_lock); ++ ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); ++ init_completion(&s->cpu_idle_completion); ++ ++ s->dev->dma_parms = &s->dma_parms; ++ dma_set_max_seg_size(s->dev, 0xfffff000); /* dma_parms limit */ ++ /* ++ * Set dma mask to 48 bits. This is the current limit of ++ * trusty_encode_page_info. ++ */ ++ dma_coerce_mask_and_coherent(s->dev, DMA_BIT_MASK(48)); ++ ++ platform_set_drvdata(pdev, s); ++ ++ trusty_init_version(s, &pdev->dev); ++ ++ ret = trusty_init_api_version(s, &pdev->dev); ++ if (ret < 0) ++ goto err_api_version; ++ ++ ret = trusty_init_msg_buf(s, &pdev->dev); ++ if (ret < 0) ++ goto err_init_msg_buf; ++ ++ s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0); ++ if (!s->nop_wq) { ++ ret = -ENODEV; ++ dev_err(&pdev->dev, "Failed create trusty-nop-wq\n"); ++ goto err_create_nop_wq; ++ } ++ ++ s->nop_works = alloc_percpu(struct trusty_work); ++ if (!s->nop_works) { ++ ret = -ENOMEM; ++ dev_err(&pdev->dev, "Failed to allocate works\n"); ++ goto err_alloc_works; ++ } ++ ++ if (s->api_version < TRUSTY_API_VERSION_SMP) ++ work_func = locked_nop_work_func; ++ else ++ work_func = nop_work_func; ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); ++ ++ tw->ts = s; ++ INIT_WORK(&tw->work, work_func); ++ } ++ ++ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to add children: %d\n", ret); ++ goto err_add_children; ++ } ++ ++ return 0; ++ ++err_add_children: ++ for_each_possible_cpu(cpu) { ++ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); ++ ++ flush_work(&tw->work); ++ } ++ free_percpu(s->nop_works); ++err_alloc_works: ++ destroy_workqueue(s->nop_wq); ++err_create_nop_wq: ++ trusty_free_msg_buf(s, &pdev->dev); ++err_init_msg_buf: ++err_api_version: ++ s->dev->dma_parms = NULL; ++ kfree(s->version_str); ++ device_for_each_child(&pdev->dev, NULL, trusty_remove_child); ++ mutex_destroy(&s->share_memory_msg_lock); ++ mutex_destroy(&s->smc_lock); ++ kfree(s); ++err_allocate_state: ++ return ret; ++} ++ ++static int trusty_remove(struct platform_device *pdev) ++{ ++ unsigned int cpu; ++ struct trusty_state *s = platform_get_drvdata(pdev); ++ ++ device_for_each_child(&pdev->dev, NULL, trusty_remove_child); ++ ++ for_each_possible_cpu(cpu) { ++ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); ++ ++ flush_work(&tw->work); ++ } ++ free_percpu(s->nop_works); ++ destroy_workqueue(s->nop_wq); ++ ++ mutex_destroy(&s->share_memory_msg_lock); ++ mutex_destroy(&s->smc_lock); ++ trusty_free_msg_buf(s, &pdev->dev); ++ s->dev->dma_parms = NULL; ++ kfree(s->version_str); ++ kfree(s); ++ return 0; ++} ++ ++static const struct of_device_id trusty_of_match[] = { ++ { .compatible = "android,trusty-smc-v1", }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(trusty, trusty_of_match); ++ ++static struct platform_driver trusty_driver = { ++ .probe = trusty_probe, ++ .remove = trusty_remove, ++ .driver = { ++ .name = "trusty", ++ .of_match_table = trusty_of_match, ++ .dev_groups = trusty_groups, ++ }, ++}; ++ ++static int __init trusty_driver_init(void) ++{ ++ return platform_driver_register(&trusty_driver); ++} ++ ++static void __exit trusty_driver_exit(void) ++{ ++ platform_driver_unregister(&trusty_driver); ++} ++ ++subsys_initcall(trusty_driver_init); ++module_exit(trusty_driver_exit); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Trusty core driver"); +diff --git a/include/linux/trusty/arm_ffa.h b/include/linux/trusty/arm_ffa.h +new file mode 100644 +index 000000000000..ab7b2afb794c +--- /dev/null ++++ b/include/linux/trusty/arm_ffa.h +@@ -0,0 +1,590 @@ ++/* SPDX-License-Identifier: MIT */ ++/* ++ * Copyright (C) 2020 Google, Inc. ++ * ++ * Trusty and TF-A also have a copy of this header. ++ * Please keep the copies in sync. ++ */ ++#ifndef __LINUX_TRUSTY_ARM_FFA_H ++#define __LINUX_TRUSTY_ARM_FFA_H ++ ++/* ++ * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC 1_0 ++ * (https://developer.arm.com/docs/den0077/a) needed for shared memory. ++ */ ++ ++#include "smcall.h" ++ ++#ifndef STATIC_ASSERT ++#define STATIC_ASSERT(e) _Static_assert(e, #e) ++#endif ++ ++#define FFA_CURRENT_VERSION_MAJOR (1U) ++#define FFA_CURRENT_VERSION_MINOR (0U) ++ ++#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16) ++#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff)) ++#define FFA_VERSION(major, minor) (((major) << 16) | (minor)) ++#define FFA_CURRENT_VERSION \ ++ FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR) ++ ++#define SMC_ENTITY_SHARED_MEMORY 4 ++ ++#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \ ++ SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr) ++#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \ ++ SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr) ++ ++/** ++ * typedef ffa_endpoint_id16_t - Endpoint ID ++ * ++ * Current implementation only supports VMIDs. FFA spec also support stream ++ * endpoint ids. ++ */ ++typedef uint16_t ffa_endpoint_id16_t; ++ ++/** ++ * struct ffa_cons_mrd - Constituent memory region descriptor ++ * @address: ++ * Start address of contiguous memory region. Must be 4K page aligned. ++ * @page_count: ++ * Number of 4K pages in region. ++ * @reserved_12_15: ++ * Reserve bytes 12-15 to pad struct size to 16 bytes. ++ */ ++struct ffa_cons_mrd { ++ uint64_t address; ++ uint32_t page_count; ++ uint32_t reserved_12_15; ++}; ++STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16); ++ ++/** ++ * struct ffa_comp_mrd - Composite memory region descriptor ++ * @total_page_count: ++ * Number of 4k pages in memory region. Must match sum of ++ * @address_range_array[].page_count. ++ * @address_range_count: ++ * Number of entries in @address_range_array. ++ * @reserved_8_15: ++ * Reserve bytes 8-15 to pad struct size to 16 byte alignment and ++ * make @address_range_array 16 byte aligned. ++ * @address_range_array: ++ * Array of &struct ffa_cons_mrd entries. ++ */ ++struct ffa_comp_mrd { ++ uint32_t total_page_count; ++ uint32_t address_range_count; ++ uint64_t reserved_8_15; ++ struct ffa_cons_mrd address_range_array[]; ++}; ++STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16); ++ ++/** ++ * typedef ffa_mem_attr8_t - Memory region attributes ++ * ++ * * @FFA_MEM_ATTR_DEVICE_NGNRNE: ++ * Device-nGnRnE. ++ * * @FFA_MEM_ATTR_DEVICE_NGNRE: ++ * Device-nGnRE. ++ * * @FFA_MEM_ATTR_DEVICE_NGRE: ++ * Device-nGRE. ++ * * @FFA_MEM_ATTR_DEVICE_GRE: ++ * Device-GRE. ++ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ++ * Normal memory. Non-cacheable. ++ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ++ * Normal memory. Write-back cached. ++ * * @FFA_MEM_ATTR_NON_SHAREABLE ++ * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. ++ * * @FFA_MEM_ATTR_OUTER_SHAREABLE ++ * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. ++ * * @FFA_MEM_ATTR_INNER_SHAREABLE ++ * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. ++ */ ++typedef uint8_t ffa_mem_attr8_t; ++#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2)) ++#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2)) ++#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2)) ++#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2)) ++#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2)) ++#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2)) ++#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0) ++#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0) ++#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0) ++ ++/** ++ * typedef ffa_mem_perm8_t - Memory access permissions ++ * ++ * * @FFA_MEM_ATTR_RO ++ * Request or specify read-only mapping. ++ * * @FFA_MEM_ATTR_RW ++ * Request or allow read-write mapping. ++ * * @FFA_MEM_PERM_NX ++ * Deny executable mapping. ++ * * @FFA_MEM_PERM_X ++ * Request executable mapping. ++ */ ++typedef uint8_t ffa_mem_perm8_t; ++#define FFA_MEM_PERM_RO (1U << 0) ++#define FFA_MEM_PERM_RW (1U << 1) ++#define FFA_MEM_PERM_NX (1U << 2) ++#define FFA_MEM_PERM_X (1U << 3) ++ ++/** ++ * typedef ffa_mem_flag8_t - Endpoint memory flags ++ * ++ * * @FFA_MEM_FLAG_OTHER ++ * Other borrower. Memory region must not be or was not retrieved on behalf ++ * of this endpoint. ++ */ ++typedef uint8_t ffa_mem_flag8_t; ++#define FFA_MEM_FLAG_OTHER (1U << 0) ++ ++/** ++ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags ++ * ++ * * @FFA_MTD_FLAG_ZERO_MEMORY ++ * Zero memory after unmapping from sender (must be 0 for share). ++ * * @FFA_MTD_FLAG_TIME_SLICING ++ * Not supported by this implementation. ++ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH ++ * Zero memory after unmapping from borrowers (must be 0 for share). ++ * * @FFA_MTD_FLAG_TYPE_MASK ++ * Bit-mask to extract memory management transaction type from flags. ++ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY ++ * Share memory transaction flag. ++ * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from ++ * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that ++ * it must have. ++ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK ++ * Not supported by this implementation. ++ */ ++typedef uint32_t ffa_mtd_flag32_t; ++#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0) ++#define FFA_MTD_FLAG_TIME_SLICING (1U << 1) ++#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2) ++#define FFA_MTD_FLAG_TYPE_MASK (3U << 3) ++#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3) ++#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5) ++ ++/** ++ * struct ffa_mapd - Memory access permissions descriptor ++ * @endpoint_id: ++ * Endpoint id that @memory_access_permissions and @flags apply to. ++ * (&typedef ffa_endpoint_id16_t). ++ * @memory_access_permissions: ++ * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t). ++ * @flags: ++ * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t). ++ */ ++struct ffa_mapd { ++ ffa_endpoint_id16_t endpoint_id; ++ ffa_mem_perm8_t memory_access_permissions; ++ ffa_mem_flag8_t flags; ++}; ++STATIC_ASSERT(sizeof(struct ffa_mapd) == 4); ++ ++/** ++ * struct ffa_emad - Endpoint memory access descriptor. ++ * @mapd: &struct ffa_mapd. ++ * @comp_mrd_offset: ++ * Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd. ++ * @reserved_8_15: ++ * Reserved bytes 8-15. Must be 0. ++ */ ++struct ffa_emad { ++ struct ffa_mapd mapd; ++ uint32_t comp_mrd_offset; ++ uint64_t reserved_8_15; ++}; ++STATIC_ASSERT(sizeof(struct ffa_emad) == 16); ++ ++/** ++ * struct ffa_mtd - Memory transaction descriptor. ++ * @sender_id: ++ * Sender endpoint id. ++ * @memory_region_attributes: ++ * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t). ++ * @reserved_3: ++ * Reserved bytes 3. Must be 0. ++ * @flags: ++ * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t). ++ * @handle: ++ * Id of shared memory object. Most be 0 for MEM_SHARE. ++ * @tag: Client allocated tag. Must match original value. ++ * @reserved_24_27: ++ * Reserved bytes 24-27. Must be 0. ++ * @emad_count: ++ * Number of entries in @emad. Must be 1 in current implementation. ++ * FFA spec allows more entries. ++ * @emad: ++ * Endpoint memory access descriptor array (see @struct ffa_emad). ++ */ ++struct ffa_mtd { ++ ffa_endpoint_id16_t sender_id; ++ ffa_mem_attr8_t memory_region_attributes; ++ uint8_t reserved_3; ++ ffa_mtd_flag32_t flags; ++ uint64_t handle; ++ uint64_t tag; ++ uint32_t reserved_24_27; ++ uint32_t emad_count; ++ struct ffa_emad emad[]; ++}; ++STATIC_ASSERT(sizeof(struct ffa_mtd) == 32); ++ ++/** ++ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor. ++ * @handle: ++ * Id of shared memory object to relinquish. ++ * @flags: ++ * If bit 0 is set clear memory after unmapping from borrower. Must be 0 ++ * for share. Bit[1]: Time slicing. Not supported, must be 0. All other ++ * bits are reserved 0. ++ * @endpoint_count: ++ * Number of entries in @endpoint_array. ++ * @endpoint_array: ++ * Array of endpoint ids. ++ */ ++struct ffa_mem_relinquish_descriptor { ++ uint64_t handle; ++ uint32_t flags; ++ uint32_t endpoint_count; ++ ffa_endpoint_id16_t endpoint_array[]; ++}; ++STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16); ++ ++/** ++ * enum ffa_error - FF-A error code ++ * @FFA_ERROR_NOT_SUPPORTED: ++ * Operation contained possibly valid parameters not supported by the ++ * current implementation. Does not match FF-A 1.0 EAC 1_0 definition. ++ * @FFA_ERROR_INVALID_PARAMETERS: ++ * Invalid parameters. Conditions function specific. ++ * @FFA_ERROR_NO_MEMORY: ++ * Not enough memory. ++ * @FFA_ERROR_DENIED: ++ * Operation not allowed. Conditions function specific. ++ * ++ * FF-A 1.0 EAC 1_0 defines other error codes as well but the current ++ * implementation does not use them. ++ */ ++enum ffa_error { ++ FFA_ERROR_NOT_SUPPORTED = -1, ++ FFA_ERROR_INVALID_PARAMETERS = -2, ++ FFA_ERROR_NO_MEMORY = -3, ++ FFA_ERROR_DENIED = -6, ++}; ++ ++/** ++ * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA ++ */ ++#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60) ++ ++/** ++ * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA ++ */ ++#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F) ++ ++/** ++ * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA ++ */ ++#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60) ++ ++/** ++ * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA ++ */ ++#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F) ++ ++/** ++ * SMC_FC_FFA_ERROR - SMC error return opcode ++ * ++ * Register arguments: ++ * ++ * * w1: VMID in [31:16], vCPU in [15:0] ++ * * w2: Error code (&enum ffa_error) ++ */ ++#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60) ++ ++/** ++ * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode ++ * ++ * Register arguments: ++ * ++ * * w1: VMID in [31:16], vCPU in [15:0] ++ * * w2-w7: Function specific ++ */ ++#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61) ++ ++/** ++ * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode ++ * ++ * Register arguments: ++ * ++ * * w1: VMID in [31:16], vCPU in [15:0] ++ * * w2/x2-w7/x7: Function specific ++ */ ++#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61) ++ ++/** ++ * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version ++ * ++ * Register arguments: ++ * ++ * * w1: Major version bit[30:16] and minor version in bit[15:0] supported ++ * by caller. Bit[31] must be 0. ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ * * w2: Major version bit[30:16], minor version in bit[15:0], bit[31] must ++ * be 0. ++ * ++ * or ++ * ++ * * w0: SMC_FC_FFA_ERROR ++ * * w2: FFA_ERROR_NOT_SUPPORTED if major version passed in is less than the ++ * minimum major version supported. ++ */ ++#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63) ++ ++/** ++ * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support ++ * ++ * Register arguments: ++ * ++ * * w1: FF-A function ID ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ * * w2: Bit[0]: Supports custom buffers for memory transactions. ++ * Bit[1:0]: For RXTX_MAP min buffer size and alignment boundary. ++ * Other bits must be 0. ++ * * w3: For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can ++ * retrieve each memory region before relinquishing it specified as ++ * ((1U << (value + 1)) - 1 (or value = bits in reference count - 1). ++ * For all other bits and commands: must be 0. ++ * or ++ * ++ * * w0: SMC_FC_FFA_ERROR ++ * * w2: FFA_ERROR_NOT_SUPPORTED if function is not implemented, or ++ * FFA_ERROR_INVALID_PARAMETERS if function id is not valid. ++ */ ++#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64) ++ ++/** ++ * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers ++ * ++ * Register arguments: ++ * ++ * * w1: TX address ++ * * w2: RX address ++ * * w3: RX/TX page count in bit[5:0] ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ */ ++#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66) ++ ++/** ++ * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers ++ * ++ * Register arguments: ++ * ++ * * x1: TX address ++ * * x2: RX address ++ * * x3: RX/TX page count in bit[5:0] ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ */ ++#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66) ++#ifdef CONFIG_64BIT ++#define SMC_FCZ_FFA_RXTX_MAP SMC_FC64_FFA_RXTX_MAP ++#else ++#define SMC_FCZ_FFA_RXTX_MAP SMC_FC_FFA_RXTX_MAP ++#endif ++ ++/** ++ * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers ++ * ++ * Register arguments: ++ * ++ * * w1: ID in [31:16] ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ */ ++#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67) ++ ++/** ++ * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ * * w2: ID in bit[15:0], bit[31:16] must be 0. ++ */ ++#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69) ++ ++/** ++ * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory ++ * ++ * Not supported. ++ */ ++#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71) ++ ++/** ++ * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory ++ * ++ * Not currently supported. ++ */ ++#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72) ++ ++/** ++ * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory ++ * ++ * Register arguments: ++ * ++ * * w1: Total length ++ * * w2: Fragment length ++ * * w3: Address ++ * * w4: Page count ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ * * w2/w3: Handle ++ * ++ * or ++ * ++ * * w0: &SMC_FC_FFA_MEM_FRAG_RX ++ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX ++ * ++ * or ++ * ++ * * w0: SMC_FC_FFA_ERROR ++ * * w2: Error code (&enum ffa_error) ++ */ ++#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73) ++ ++/** ++ * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory ++ * ++ * Register arguments: ++ * ++ * * w1: Total length ++ * * w2: Fragment length ++ * * x3: Address ++ * * w4: Page count ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ * * w2/w3: Handle ++ * ++ * or ++ * ++ * * w0: &SMC_FC_FFA_MEM_FRAG_RX ++ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX ++ * ++ * or ++ * ++ * * w0: SMC_FC_FFA_ERROR ++ * * w2: Error code (&enum ffa_error) ++ */ ++#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73) ++ ++/** ++ * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory ++ * ++ * Register arguments: ++ * ++ * * w1: Total length ++ * * w2: Fragment length ++ * * w3: Address ++ * * w4: Page count ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP ++ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP ++ */ ++#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74) ++ ++/** ++ * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory ++ * ++ * Register arguments: ++ * ++ * * w1: Total length ++ * * w2: Fragment length ++ * * x3: Address ++ * * w4: Page count ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP ++ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP ++ */ ++#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74) ++ ++/** ++ * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode ++ * ++ * Register arguments: ++ * ++ * * w1: Total length ++ * * w2: Fragment length ++ */ ++#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75) ++ ++/** ++ * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory ++ * ++ * Input in &struct ffa_mem_relinquish_descriptor format in message buffer. ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ */ ++#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76) ++ ++/** ++ * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory ++ * ++ * Register arguments: ++ * ++ * * w1/w2: Handle ++ * * w3: Flags ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_SUCCESS ++ */ ++#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77) ++ ++/** ++ * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment. ++ * ++ * Register arguments: ++ * ++ * * w1/w2: Cookie ++ * * w3: Fragment offset. ++ * * w4: Endpoint id ID in [31:16], if client is hypervisor. ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_MEM_FRAG_TX ++ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_FRAG_TX ++ */ ++#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A) ++ ++/** ++ * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment ++ * ++ * Register arguments: ++ * ++ * * w1/w2: Cookie ++ * * w3: Fragment length. ++ * * w4: Sender endpoint id ID in [31:16], if client is hypervisor. ++ * ++ * Return: ++ * * w0: &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS. ++ * * w1/x1-w5/x5: See opcode in w0. ++ */ ++#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B) ++ ++#endif /* __LINUX_TRUSTY_ARM_FFA_H */ +diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h +new file mode 100644 +index 000000000000..f6504448c6c3 +--- /dev/null ++++ b/include/linux/trusty/sm_err.h +@@ -0,0 +1,28 @@ ++/* SPDX-License-Identifier: MIT */ ++/* ++ * Copyright (c) 2013 Google Inc. All rights reserved ++ * ++ * Trusty and TF-A also have a copy of this header. ++ * Please keep the copies in sync. ++ */ ++#ifndef __LINUX_TRUSTY_SM_ERR_H ++#define __LINUX_TRUSTY_SM_ERR_H ++ ++/* Errors from the secure monitor */ ++#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */ ++#define SM_ERR_INVALID_PARAMETERS -2 ++#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */ ++#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */ ++#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */ ++#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */ ++#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */ ++#define SM_ERR_NOT_SUPPORTED -8 ++#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */ ++#define SM_ERR_END_OF_INPUT -10 ++#define SM_ERR_PANIC -11 /* Secure OS crashed */ ++#define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */ ++#define SM_ERR_CPU_IDLE -13 /* SMC call waiting for another CPU */ ++#define SM_ERR_NOP_INTERRUPTED -14 /* Got interrupted. Call back with new SMC_SC_NOP */ ++#define SM_ERR_NOP_DONE -15 /* Cpu idle after SMC_SC_NOP (not an error) */ ++ ++#endif +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +new file mode 100644 +index 000000000000..aea3f6068593 +--- /dev/null ++++ b/include/linux/trusty/smcall.h +@@ -0,0 +1,124 @@ ++/* SPDX-License-Identifier: MIT */ ++/* ++ * Copyright (c) 2013-2014 Google Inc. All rights reserved ++ * ++ * Trusty and TF-A also have a copy of this header. ++ * Please keep the copies in sync. ++ */ ++#ifndef __LINUX_TRUSTY_SMCALL_H ++#define __LINUX_TRUSTY_SMCALL_H ++ ++#define SMC_NUM_ENTITIES 64 ++#define SMC_NUM_ARGS 4 ++#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1) ++ ++#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000) ++#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000) ++#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24) ++#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF) ++ ++#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1U) << 31) | \ ++ (((smc64) & 0x1U) << 30) | \ ++ (((entity) & 0x3FU) << 24) | \ ++ ((fn) & 0xFFFFU) \ ++ ) ++ ++#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0) ++#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0) ++#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1) ++#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1) ++ ++#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */ ++#define SMC_ENTITY_CPU 1 /* CPU Service calls */ ++#define SMC_ENTITY_SIP 2 /* SIP Service calls */ ++#define SMC_ENTITY_OEM 3 /* OEM Service calls */ ++#define SMC_ENTITY_STD 4 /* Standard Service calls */ ++#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */ ++#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */ ++#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */ ++#define SMC_ENTITY_LOGGING 51 /* Used for secure -> nonsecure logging */ ++#define SMC_ENTITY_TEST 52 /* Used for secure -> nonsecure tests */ ++#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */ ++ ++/* FC = Fast call, SC = Standard call */ ++#define SMC_SC_RESTART_LAST SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1) ++ ++/** ++ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq ++ * ++ * No arguments, no return value. ++ * ++ * Re-enter trusty after returning to ns to process an fiq. Must be called iff ++ * trusty returns SM_ERR_FIQ_INTERRUPTED. ++ * ++ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later. ++ */ ++#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2) ++ ++/** ++ * SMC_SC_NOP - Enter trusty to run pending work. ++ * ++ * No arguments. ++ * ++ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE. ++ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated. ++ * ++ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later. ++ */ ++#define SMC_SC_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3) ++ ++/* ++ * Return from secure os to non-secure os with return value in r1 ++ */ ++#define SMC_SC_NS_RETURN SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0) ++ ++#define SMC_FC_RESERVED SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0) ++#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1) ++#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2) ++ ++#define TRUSTY_IRQ_TYPE_NORMAL (0) ++#define TRUSTY_IRQ_TYPE_PER_CPU (1) ++#define TRUSTY_IRQ_TYPE_DOORBELL (2) ++#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3) ++ ++#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 7) ++#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 8) ++ ++#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 9) ++#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 10) ++ ++/** ++ * SMC_FC_API_VERSION - Find and select supported API version. ++ * ++ * @r1: Version supported by client. ++ * ++ * Returns version supported by trusty. ++ * ++ * If multiple versions are supported, the client should start by calling ++ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then ++ * return a version it supports. If the client does not support the version ++ * returned by trusty and the version returned is less than the version ++ * requested, repeat the call with the largest supported version less than the ++ * last returned version. ++ * ++ * This call must be made before any calls that are affected by the api version. ++ */ ++#define TRUSTY_API_VERSION_RESTART_FIQ (1) ++#define TRUSTY_API_VERSION_SMP (2) ++#define TRUSTY_API_VERSION_SMP_NOP (3) ++#define TRUSTY_API_VERSION_PHYS_MEM_OBJ (4) ++#define TRUSTY_API_VERSION_MEM_OBJ (5) ++#define TRUSTY_API_VERSION_CURRENT (5) ++#define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) ++ ++/* TRUSTED_OS entity calls */ ++#define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20) ++#define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21) ++#define SMC_SC_VIRTIO_STOP SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22) ++ ++#define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23) ++#define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24) ++#define SMC_NC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25) ++ ++#endif /* __LINUX_TRUSTY_SMCALL_H */ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +new file mode 100644 +index 000000000000..efbb36999a8b +--- /dev/null ++++ b/include/linux/trusty/trusty.h +@@ -0,0 +1,131 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2013 Google, Inc. ++ */ ++#ifndef __LINUX_TRUSTY_TRUSTY_H ++#define __LINUX_TRUSTY_TRUSTY_H ++ ++#include <linux/kernel.h> ++#include <linux/trusty/sm_err.h> ++#include <linux/types.h> ++#include <linux/device.h> ++#include <linux/pagemap.h> ++ ++ ++#if IS_ENABLED(CONFIG_TRUSTY) ++s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); ++s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); ++#ifdef CONFIG_64BIT ++s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2); ++#endif ++#else ++static inline s32 trusty_std_call32(struct device *dev, u32 smcnr, ++ u32 a0, u32 a1, u32 a2) ++{ ++ return SM_ERR_UNDEFINED_SMC; ++} ++static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr, ++ u32 a0, u32 a1, u32 a2) ++{ ++ return SM_ERR_UNDEFINED_SMC; ++} ++#ifdef CONFIG_64BIT ++static inline s64 trusty_fast_call64(struct device *dev, ++ u64 smcnr, u64 a0, u64 a1, u64 a2) ++{ ++ return SM_ERR_UNDEFINED_SMC; ++} ++#endif ++#endif ++ ++struct notifier_block; ++enum { ++ TRUSTY_CALL_PREPARE, ++ TRUSTY_CALL_RETURNED, ++}; ++int trusty_call_notifier_register(struct device *dev, ++ struct notifier_block *n); ++int trusty_call_notifier_unregister(struct device *dev, ++ struct notifier_block *n); ++const char *trusty_version_str_get(struct device *dev); ++u32 trusty_get_api_version(struct device *dev); ++bool trusty_get_panic_status(struct device *dev); ++ ++struct ns_mem_page_info { ++ u64 paddr; ++ u8 ffa_mem_attr; ++ u8 ffa_mem_perm; ++ u64 compat_attr; ++}; ++ ++int trusty_encode_page_info(struct ns_mem_page_info *inf, ++ struct page *page, pgprot_t pgprot); ++ ++struct scatterlist; ++typedef u64 trusty_shared_mem_id_t; ++int trusty_share_memory(struct device *dev, trusty_shared_mem_id_t *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot); ++int trusty_share_memory_compat(struct device *dev, trusty_shared_mem_id_t *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot); ++int trusty_transfer_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag, bool lend); ++int trusty_reclaim_memory(struct device *dev, trusty_shared_mem_id_t id, ++ struct scatterlist *sglist, unsigned int nents); ++ ++struct dma_buf; ++#ifdef CONFIG_TRUSTY_DMA_BUF_FFA_TAG ++u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf); ++#else ++static inline u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf) ++{ ++ return 0; ++} ++#endif ++ ++/* Invalid handle value is defined by FF-A spec */ ++#ifdef CONFIG_TRUSTY_DMA_BUF_SHARED_MEM_ID ++/** ++ * trusty_dma_buf_get_shared_mem_id() - Get memory ID corresponding to a dma_buf ++ * @dma_buf: DMA buffer ++ * @id: Pointer to output trusty_shared_mem_id_t ++ * ++ * Sets @id to trusty_shared_mem_id_t corresponding to the given @dma_buf. ++ * @dma_buf "owns" the ID, i.e. is responsible for allocating/releasing it. ++ * @dma_buf with an allocated @id must be in secure memory and should only be ++ * sent to Trusty using TRUSTY_SEND_SECURE. ++ * ++ * Return: ++ * * 0 - success ++ * * -ENODATA - @dma_buf does not own a trusty_shared_mem_id_t ++ * * ... - @dma_buf should not be lent or shared ++ */ ++int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf, ++ trusty_shared_mem_id_t *id); ++#else ++static inline int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf, ++ trusty_shared_mem_id_t *id) ++{ ++ return -ENODATA; ++} ++#endif ++ ++struct trusty_nop { ++ struct list_head node; ++ u32 args[3]; ++}; ++ ++static inline void trusty_nop_init(struct trusty_nop *nop, ++ u32 arg0, u32 arg1, u32 arg2) { ++ INIT_LIST_HEAD(&nop->node); ++ nop->args[0] = arg0; ++ nop->args[1] = arg1; ++ nop->args[2] = arg2; ++} ++ ++void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop); ++void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop); ++ ++#endif +diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h +new file mode 100644 +index 000000000000..9386392f3a64 +--- /dev/null ++++ b/include/linux/trusty/trusty_ipc.h +@@ -0,0 +1,89 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2015 Google, Inc. ++ */ ++#ifndef __LINUX_TRUSTY_TRUSTY_IPC_H ++#define __LINUX_TRUSTY_TRUSTY_IPC_H ++ ++#include <linux/list.h> ++#include <linux/scatterlist.h> ++#include <linux/trusty/trusty.h> ++#include <linux/types.h> ++ ++struct tipc_chan; ++ ++struct tipc_msg_buf { ++ void *buf_va; ++ struct scatterlist sg; ++ trusty_shared_mem_id_t buf_id; ++ size_t buf_sz; ++ size_t wpos; ++ size_t rpos; ++ size_t shm_cnt; ++ struct list_head node; ++}; ++ ++enum tipc_chan_event { ++ TIPC_CHANNEL_CONNECTED = 1, ++ TIPC_CHANNEL_DISCONNECTED, ++ TIPC_CHANNEL_SHUTDOWN, ++}; ++ ++struct tipc_chan_ops { ++ void (*handle_event)(void *cb_arg, int event); ++ struct tipc_msg_buf *(*handle_msg)(void *cb_arg, ++ struct tipc_msg_buf *mb); ++ void (*handle_release)(void *cb_arg); ++}; ++ ++struct tipc_chan *tipc_create_channel(struct device *dev, ++ const struct tipc_chan_ops *ops, ++ void *cb_arg); ++ ++int tipc_chan_connect(struct tipc_chan *chan, const char *port); ++ ++int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb); ++ ++int tipc_chan_shutdown(struct tipc_chan *chan); ++ ++void tipc_chan_destroy(struct tipc_chan *chan); ++ ++struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan); ++ ++void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb); ++ ++struct tipc_msg_buf * ++tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, long timeout); ++ ++void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb); ++ ++static inline size_t mb_avail_space(struct tipc_msg_buf *mb) ++{ ++ return mb->buf_sz - mb->wpos; ++} ++ ++static inline size_t mb_avail_data(struct tipc_msg_buf *mb) ++{ ++ return mb->wpos - mb->rpos; ++} ++ ++static inline void *mb_put_data(struct tipc_msg_buf *mb, size_t len) ++{ ++ void *pos = (u8 *)mb->buf_va + mb->wpos; ++ ++ BUG_ON(mb->wpos + len > mb->buf_sz); ++ mb->wpos += len; ++ return pos; ++} ++ ++static inline void *mb_get_data(struct tipc_msg_buf *mb, size_t len) ++{ ++ void *pos = (u8 *)mb->buf_va + mb->rpos; ++ ++ BUG_ON(mb->rpos + len > mb->wpos); ++ mb->rpos += len; ++ return pos; ++} ++ ++#endif /* __LINUX_TRUSTY_TRUSTY_IPC_H */ ++ +diff --git a/include/uapi/linux/trusty/ipc.h b/include/uapi/linux/trusty/ipc.h +new file mode 100644 +index 000000000000..af91035484f1 +--- /dev/null ++++ b/include/uapi/linux/trusty/ipc.h +@@ -0,0 +1,65 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++ ++#ifndef _UAPI_LINUX_TRUSTY_IPC_H_ ++#define _UAPI_LINUX_TRUSTY_IPC_H_ ++ ++#include <linux/ioctl.h> ++#include <linux/types.h> ++#include <linux/uio.h> ++ ++/** ++ * enum transfer_kind - How to send an fd to Trusty ++ * @TRUSTY_SHARE: Memory will be accessible by Linux and Trusty. On ARM it ++ * will be mapped as nonsecure. Suitable for shared memory. ++ * The paired fd must be a "dma_buf". ++ * @TRUSTY_LEND: Memory will be accessible only to Trusty. On ARM it will ++ * be transitioned to "Secure" memory if Trusty is in ++ * TrustZone. This transfer kind is suitable for donating ++ * video buffers or other similar resources. The paired fd ++ * may need to come from a platform-specific allocator for ++ * memory that may be transitioned to "Secure". ++ * @TRUSTY_SEND_SECURE: Send memory that is already "Secure". Memory will be ++ * accessible only to Trusty. The paired fd may need to ++ * come from a platform-specific allocator that returns ++ * "Secure" buffers. ++ * ++ * Describes how the user would like the resource in question to be sent to ++ * Trusty. Options may be valid only for certain kinds of fds. ++ */ ++enum transfer_kind { ++ TRUSTY_SHARE = 0, ++ TRUSTY_LEND = 1, ++ TRUSTY_SEND_SECURE = 2, ++}; ++ ++/** ++ * struct trusty_shm - Describes a transfer of memory to Trusty ++ * @fd: The fd to transfer ++ * @transfer: How to transfer it - see &enum transfer_kind ++ */ ++struct trusty_shm { ++ __s32 fd; ++ __u32 transfer; ++}; ++ ++/** ++ * struct tipc_send_msg_req - Request struct for @TIPC_IOC_SEND_MSG ++ * @iov: Pointer to an array of &struct iovec describing data to be sent ++ * @shm: Pointer to an array of &struct trusty_shm describing any file ++ * descriptors to be transferred. ++ * @iov_cnt: Number of elements in the @iov array ++ * @shm_cnt: Number of elements in the @shm array ++ */ ++struct tipc_send_msg_req { ++ __u64 iov; ++ __u64 shm; ++ __u64 iov_cnt; ++ __u64 shm_cnt; ++}; ++ ++#define TIPC_IOC_MAGIC 'r' ++#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *) ++#define TIPC_IOC_SEND_MSG _IOW(TIPC_IOC_MAGIC, 0x81, \ ++ struct tipc_send_msg_req) ++ ++#endif +diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h +index b052355ac7a3..cf6b95d9a1ec 100644 +--- a/include/uapi/linux/virtio_ids.h ++++ b/include/uapi/linux/virtio_ids.h +@@ -39,6 +39,7 @@ + #define VIRTIO_ID_9P 9 /* 9p virtio console */ + #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ + #define VIRTIO_ID_CAIF 12 /* Virtio caif */ ++#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */ + #define VIRTIO_ID_GPU 16 /* virtio GPU */ + #define VIRTIO_ID_INPUT 18 /* virtio input */ + #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0036-ANDROID-trusty-Remove-FFA-specific-initilization.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0036-ANDROID-trusty-Remove-FFA-specific-initilization.patch new file mode 100644 index 0000000000..6dd8af26aa --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0036-ANDROID-trusty-Remove-FFA-specific-initilization.patch @@ -0,0 +1,1076 @@ +From 8318af58a0f5d29352d3c84be6b20fe6d1ca352f Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Fri, 14 Jan 2022 13:41:26 +0000 +Subject: [PATCH 23/32] ANDROID: trusty: Remove FFA specific initilization + +Remove FFA specific initialization and its arm_ffa.h file from Trusty +driver. These changes are done so that Trusty can use ARM FFA driver +and its related header files. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Iaad473659de94930cdf78cd7201f016d59cee8d7 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/trusty-mem.c | 37 --- + drivers/trusty/trusty.c | 286 +--------------- + include/linux/trusty/arm_ffa.h | 590 --------------------------------- + include/linux/trusty/trusty.h | 3 - + 4 files changed, 3 insertions(+), 913 deletions(-) + delete mode 100644 include/linux/trusty/arm_ffa.h + +diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c +index 8a360298e501..7775ff76c38c 100644 +--- a/drivers/trusty/trusty-mem.c ++++ b/drivers/trusty/trusty-mem.c +@@ -5,7 +5,6 @@ + + #include <linux/types.h> + #include <linux/printk.h> +-#include <linux/trusty/arm_ffa.h> + #include <linux/trusty/trusty.h> + #include <linux/trusty/smcall.h> + +@@ -75,8 +74,6 @@ int trusty_encode_page_info(struct ns_mem_page_info *inf, + { + int mem_attr; + u64 pte; +- u8 ffa_mem_attr; +- u8 ffa_mem_perm = 0; + + if (!inf || !page) + return -EINVAL; +@@ -89,30 +86,6 @@ int trusty_encode_page_info(struct ns_mem_page_info *inf, + if (mem_attr < 0) + return mem_attr; + +- switch (mem_attr) { +- case MEM_ATTR_STRONGLY_ORDERED: +- ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRNE; +- break; +- +- case MEM_ATTR_DEVICE: +- ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRE; +- break; +- +- case MEM_ATTR_NORMAL_NON_CACHEABLE: +- ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED; +- break; +- +- case MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE: +- case MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE: +- ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB; +- break; +- +- default: +- return -EINVAL; +- } +- +- inf->paddr = pte; +- + /* add other attributes */ + #if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) + pte |= pgprot_val(pgprot); +@@ -123,16 +96,6 @@ int trusty_encode_page_info(struct ns_mem_page_info *inf, + pte |= ATTR_INNER_SHAREABLE; /* inner sharable */ + #endif + +- if (!(pte & ATTR_RDONLY)) +- ffa_mem_perm |= FFA_MEM_PERM_RW; +- else +- ffa_mem_perm |= FFA_MEM_PERM_RO; +- +- if ((pte & ATTR_INNER_SHAREABLE) == ATTR_INNER_SHAREABLE) +- ffa_mem_attr |= FFA_MEM_ATTR_INNER_SHAREABLE; +- +- inf->ffa_mem_attr = ffa_mem_attr; +- inf->ffa_mem_perm = ffa_mem_perm; + inf->compat_attr = (pte & 0x0000FFFFFFFFFFFFull) | + ((u64)mem_attr << 48); + return 0; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 265eab52aea0..2dec75398f69 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -11,7 +11,6 @@ + #include <linux/slab.h> + #include <linux/stat.h> + #include <linux/string.h> +-#include <linux/trusty/arm_ffa.h> + #include <linux/trusty/smcall.h> + #include <linux/trusty/sm_err.h> + #include <linux/trusty/trusty.h> +@@ -42,11 +41,6 @@ struct trusty_state { + struct list_head nop_queue; + spinlock_t nop_lock; /* protects nop_queue */ + struct device_dma_parameters dma_parms; +- void *ffa_tx; +- void *ffa_rx; +- u16 ffa_local_id; +- u16 ffa_remote_id; +- struct mutex share_memory_msg_lock; /* protects share_memory_msg */ + }; + + static inline unsigned long smc(unsigned long r0, unsigned long r1, +@@ -246,19 +240,6 @@ int trusty_transfer_memory(struct device *dev, u64 *id, + struct ns_mem_page_info pg_inf; + struct scatterlist *sg; + size_t count; +- size_t i; +- size_t len; +- u64 ffa_handle = 0; +- size_t total_len; +- size_t endpoint_count = 1; +- struct ffa_mtd *mtd = s->ffa_tx; +- size_t comp_mrd_offset = offsetof(struct ffa_mtd, emad[endpoint_count]); +- struct ffa_comp_mrd *comp_mrd = s->ffa_tx + comp_mrd_offset; +- struct ffa_cons_mrd *cons_mrd = comp_mrd->address_range_array; +- size_t cons_mrd_offset = (void *)cons_mrd - s->ffa_tx; +- struct smc_ret8 smc_ret; +- u32 cookie_low; +- u32 cookie_high; + + if (WARN_ON(dev->driver != &trusty_driver.driver)) + return -EINVAL; +@@ -284,126 +265,11 @@ int trusty_transfer_memory(struct device *dev, u64 *id, + if (ret) { + dev_err(s->dev, "%s: trusty_encode_page_info failed\n", + __func__); +- goto err_encode_page_info; +- } +- +- if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { +- *id = pg_inf.compat_attr; +- return 0; +- } +- +- len = 0; +- for_each_sg(sglist, sg, nents, i) +- len += sg_dma_len(sg); +- +- mutex_lock(&s->share_memory_msg_lock); +- +- mtd->sender_id = s->ffa_local_id; +- mtd->memory_region_attributes = pg_inf.ffa_mem_attr; +- mtd->reserved_3 = 0; +- mtd->flags = 0; +- mtd->handle = 0; +- mtd->tag = tag; +- mtd->reserved_24_27 = 0; +- mtd->emad_count = endpoint_count; +- for (i = 0; i < endpoint_count; i++) { +- struct ffa_emad *emad = &mtd->emad[i]; +- /* TODO: support stream ids */ +- emad->mapd.endpoint_id = s->ffa_remote_id; +- emad->mapd.memory_access_permissions = pg_inf.ffa_mem_perm; +- emad->mapd.flags = 0; +- emad->comp_mrd_offset = comp_mrd_offset; +- emad->reserved_8_15 = 0; +- } +- comp_mrd->total_page_count = len / PAGE_SIZE; +- comp_mrd->address_range_count = nents; +- comp_mrd->reserved_8_15 = 0; +- +- total_len = cons_mrd_offset + nents * sizeof(*cons_mrd); +- sg = sglist; +- while (count) { +- size_t lcount = +- min_t(size_t, count, (PAGE_SIZE - cons_mrd_offset) / +- sizeof(*cons_mrd)); +- size_t fragment_len = lcount * sizeof(*cons_mrd) + +- cons_mrd_offset; +- +- for (i = 0; i < lcount; i++) { +- cons_mrd[i].address = sg_dma_address(sg); +- cons_mrd[i].page_count = sg_dma_len(sg) / PAGE_SIZE; +- cons_mrd[i].reserved_12_15 = 0; +- sg = sg_next(sg); +- } +- count -= lcount; +- if (cons_mrd_offset) { +- u32 smc = lend ? SMC_FC_FFA_MEM_LEND : +- SMC_FC_FFA_MEM_SHARE; +- /* First fragment */ +- smc_ret = trusty_smc8(smc, total_len, +- fragment_len, 0, 0, 0, 0, 0); +- } else { +- smc_ret = trusty_smc8(SMC_FC_FFA_MEM_FRAG_TX, +- cookie_low, cookie_high, +- fragment_len, 0, 0, 0, 0); +- } +- if (smc_ret.r0 == SMC_FC_FFA_MEM_FRAG_RX) { +- cookie_low = smc_ret.r1; +- cookie_high = smc_ret.r2; +- dev_dbg(s->dev, "cookie %x %x", cookie_low, +- cookie_high); +- if (!count) { +- /* +- * We have sent all our descriptors. Expected +- * SMC_FC_FFA_SUCCESS, not a request to send +- * another fragment. +- */ +- dev_err(s->dev, "%s: fragment_len %zd/%zd, unexpected SMC_FC_FFA_MEM_FRAG_RX\n", +- __func__, fragment_len, total_len); +- ret = -EIO; +- break; +- } +- } else if (smc_ret.r0 == SMC_FC_FFA_SUCCESS) { +- ffa_handle = smc_ret.r2 | (u64)smc_ret.r3 << 32; +- dev_dbg(s->dev, "%s: fragment_len %zu/%zu, got handle 0x%llx\n", +- __func__, fragment_len, total_len, +- ffa_handle); +- if (count) { +- /* +- * We have not sent all our descriptors. +- * Expected SMC_FC_FFA_MEM_FRAG_RX not +- * SMC_FC_FFA_SUCCESS. +- */ +- dev_err(s->dev, "%s: fragment_len %zu/%zu, unexpected SMC_FC_FFA_SUCCESS, count %zu != 0\n", +- __func__, fragment_len, total_len, +- count); +- ret = -EIO; +- break; +- } +- } else { +- dev_err(s->dev, "%s: fragment_len %zu/%zu, SMC_FC_FFA_MEM_SHARE failed 0x%lx 0x%lx 0x%lx", +- __func__, fragment_len, total_len, +- smc_ret.r0, smc_ret.r1, smc_ret.r2); +- ret = -EIO; +- break; +- } +- +- cons_mrd = s->ffa_tx; +- cons_mrd_offset = 0; +- } +- +- mutex_unlock(&s->share_memory_msg_lock); +- +- if (!ret) { +- *id = ffa_handle; +- dev_dbg(s->dev, "%s: done\n", __func__); +- return 0; ++ return ret; + } + +- dev_err(s->dev, "%s: failed %d", __func__, ret); +- +-err_encode_page_info: +- dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); +- return ret; ++ *id = pg_inf.compat_attr; ++ return 0; + } + EXPORT_SYMBOL(trusty_transfer_memory); + +@@ -433,8 +299,6 @@ int trusty_reclaim_memory(struct device *dev, u64 id, + struct scatterlist *sglist, unsigned int nents) + { + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); +- int ret = 0; +- struct smc_ret8 smc_ret; + + if (WARN_ON(dev->driver != &trusty_driver.driver)) + return -EINVAL; +@@ -454,28 +318,6 @@ int trusty_reclaim_memory(struct device *dev, u64 id, + return 0; + } + +- mutex_lock(&s->share_memory_msg_lock); +- +- smc_ret = trusty_smc8(SMC_FC_FFA_MEM_RECLAIM, (u32)id, id >> 32, 0, 0, +- 0, 0, 0); +- if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { +- dev_err(s->dev, "%s: SMC_FC_FFA_MEM_RECLAIM failed 0x%lx 0x%lx 0x%lx", +- __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); +- if (smc_ret.r0 == SMC_FC_FFA_ERROR && +- smc_ret.r2 == FFA_ERROR_DENIED) +- ret = -EBUSY; +- else +- ret = -EIO; +- } +- +- mutex_unlock(&s->share_memory_msg_lock); +- +- if (ret != 0) +- return ret; +- +- dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); +- +- dev_dbg(s->dev, "%s: done\n", __func__); + return 0; + } + EXPORT_SYMBOL(trusty_reclaim_memory); +@@ -527,118 +369,6 @@ const char *trusty_version_str_get(struct device *dev) + } + EXPORT_SYMBOL(trusty_version_str_get); + +-static int trusty_init_msg_buf(struct trusty_state *s, struct device *dev) +-{ +- phys_addr_t tx_paddr; +- phys_addr_t rx_paddr; +- int ret; +- struct smc_ret8 smc_ret; +- +- if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) +- return 0; +- +- /* Get supported FF-A version and check if it is compatible */ +- smc_ret = trusty_smc8(SMC_FC_FFA_VERSION, FFA_CURRENT_VERSION, 0, 0, +- 0, 0, 0, 0); +- if (FFA_VERSION_TO_MAJOR(smc_ret.r0) != FFA_CURRENT_VERSION_MAJOR) { +- dev_err(s->dev, +- "%s: Unsupported FF-A version 0x%lx, expected 0x%x\n", +- __func__, smc_ret.r0, FFA_CURRENT_VERSION); +- ret = -EIO; +- goto err_version; +- } +- +- /* Check that SMC_FC_FFA_MEM_SHARE is implemented */ +- smc_ret = trusty_smc8(SMC_FC_FFA_FEATURES, SMC_FC_FFA_MEM_SHARE, 0, 0, +- 0, 0, 0, 0); +- if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { +- dev_err(s->dev, +- "%s: SMC_FC_FFA_FEATURES(SMC_FC_FFA_MEM_SHARE) failed 0x%lx 0x%lx 0x%lx\n", +- __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); +- ret = -EIO; +- goto err_features; +- } +- +- /* +- * Set FF-A endpoint IDs. +- * +- * Hardcode 0x8000 for the secure os. +- * TODO: Use FF-A call or device tree to configure this dynamically +- */ +- smc_ret = trusty_smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0); +- if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { +- dev_err(s->dev, +- "%s: SMC_FC_FFA_ID_GET failed 0x%lx 0x%lx 0x%lx\n", +- __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); +- ret = -EIO; +- goto err_id_get; +- } +- +- s->ffa_local_id = smc_ret.r2; +- s->ffa_remote_id = 0x8000; +- +- s->ffa_tx = kmalloc(PAGE_SIZE, GFP_KERNEL); +- if (!s->ffa_tx) { +- ret = -ENOMEM; +- goto err_alloc_tx; +- } +- tx_paddr = virt_to_phys(s->ffa_tx); +- if (WARN_ON(tx_paddr & (PAGE_SIZE - 1))) { +- ret = -EINVAL; +- goto err_unaligned_tx_buf; +- } +- +- s->ffa_rx = kmalloc(PAGE_SIZE, GFP_KERNEL); +- if (!s->ffa_rx) { +- ret = -ENOMEM; +- goto err_alloc_rx; +- } +- rx_paddr = virt_to_phys(s->ffa_rx); +- if (WARN_ON(rx_paddr & (PAGE_SIZE - 1))) { +- ret = -EINVAL; +- goto err_unaligned_rx_buf; +- } +- +- smc_ret = trusty_smc8(SMC_FCZ_FFA_RXTX_MAP, tx_paddr, rx_paddr, 1, 0, +- 0, 0, 0); +- if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { +- dev_err(s->dev, "%s: SMC_FCZ_FFA_RXTX_MAP failed 0x%lx 0x%lx 0x%lx\n", +- __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); +- ret = -EIO; +- goto err_rxtx_map; +- } +- +- return 0; +- +-err_rxtx_map: +-err_unaligned_rx_buf: +- kfree(s->ffa_rx); +- s->ffa_rx = NULL; +-err_alloc_rx: +-err_unaligned_tx_buf: +- kfree(s->ffa_tx); +- s->ffa_tx = NULL; +-err_alloc_tx: +-err_id_get: +-err_features: +-err_version: +- return ret; +-} +- +-static void trusty_free_msg_buf(struct trusty_state *s, struct device *dev) +-{ +- struct smc_ret8 smc_ret; +- +- smc_ret = trusty_smc8(SMC_FC_FFA_RXTX_UNMAP, 0, 0, 0, 0, 0, 0, 0); +- if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) { +- dev_err(s->dev, "%s: SMC_FC_FFA_RXTX_UNMAP failed 0x%lx 0x%lx 0x%lx\n", +- __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2); +- } else { +- kfree(s->ffa_rx); +- kfree(s->ffa_tx); +- } +-} +- + static void trusty_init_version(struct trusty_state *s, struct device *dev) + { + int ret; +@@ -842,7 +572,6 @@ static int trusty_probe(struct platform_device *pdev) + spin_lock_init(&s->nop_lock); + INIT_LIST_HEAD(&s->nop_queue); + mutex_init(&s->smc_lock); +- mutex_init(&s->share_memory_msg_lock); + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + init_completion(&s->cpu_idle_completion); + +@@ -862,10 +591,6 @@ static int trusty_probe(struct platform_device *pdev) + if (ret < 0) + goto err_api_version; + +- ret = trusty_init_msg_buf(s, &pdev->dev); +- if (ret < 0) +- goto err_init_msg_buf; +- + s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0); + if (!s->nop_wq) { + ret = -ENODEV; +@@ -910,13 +635,10 @@ static int trusty_probe(struct platform_device *pdev) + err_alloc_works: + destroy_workqueue(s->nop_wq); + err_create_nop_wq: +- trusty_free_msg_buf(s, &pdev->dev); +-err_init_msg_buf: + err_api_version: + s->dev->dma_parms = NULL; + kfree(s->version_str); + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); +- mutex_destroy(&s->share_memory_msg_lock); + mutex_destroy(&s->smc_lock); + kfree(s); + err_allocate_state: +@@ -938,9 +660,7 @@ static int trusty_remove(struct platform_device *pdev) + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); + +- mutex_destroy(&s->share_memory_msg_lock); + mutex_destroy(&s->smc_lock); +- trusty_free_msg_buf(s, &pdev->dev); + s->dev->dma_parms = NULL; + kfree(s->version_str); + kfree(s); +diff --git a/include/linux/trusty/arm_ffa.h b/include/linux/trusty/arm_ffa.h +deleted file mode 100644 +index ab7b2afb794c..000000000000 +--- a/include/linux/trusty/arm_ffa.h ++++ /dev/null +@@ -1,590 +0,0 @@ +-/* SPDX-License-Identifier: MIT */ +-/* +- * Copyright (C) 2020 Google, Inc. +- * +- * Trusty and TF-A also have a copy of this header. +- * Please keep the copies in sync. +- */ +-#ifndef __LINUX_TRUSTY_ARM_FFA_H +-#define __LINUX_TRUSTY_ARM_FFA_H +- +-/* +- * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC 1_0 +- * (https://developer.arm.com/docs/den0077/a) needed for shared memory. +- */ +- +-#include "smcall.h" +- +-#ifndef STATIC_ASSERT +-#define STATIC_ASSERT(e) _Static_assert(e, #e) +-#endif +- +-#define FFA_CURRENT_VERSION_MAJOR (1U) +-#define FFA_CURRENT_VERSION_MINOR (0U) +- +-#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16) +-#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff)) +-#define FFA_VERSION(major, minor) (((major) << 16) | (minor)) +-#define FFA_CURRENT_VERSION \ +- FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR) +- +-#define SMC_ENTITY_SHARED_MEMORY 4 +- +-#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \ +- SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr) +-#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \ +- SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr) +- +-/** +- * typedef ffa_endpoint_id16_t - Endpoint ID +- * +- * Current implementation only supports VMIDs. FFA spec also support stream +- * endpoint ids. +- */ +-typedef uint16_t ffa_endpoint_id16_t; +- +-/** +- * struct ffa_cons_mrd - Constituent memory region descriptor +- * @address: +- * Start address of contiguous memory region. Must be 4K page aligned. +- * @page_count: +- * Number of 4K pages in region. +- * @reserved_12_15: +- * Reserve bytes 12-15 to pad struct size to 16 bytes. +- */ +-struct ffa_cons_mrd { +- uint64_t address; +- uint32_t page_count; +- uint32_t reserved_12_15; +-}; +-STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16); +- +-/** +- * struct ffa_comp_mrd - Composite memory region descriptor +- * @total_page_count: +- * Number of 4k pages in memory region. Must match sum of +- * @address_range_array[].page_count. +- * @address_range_count: +- * Number of entries in @address_range_array. +- * @reserved_8_15: +- * Reserve bytes 8-15 to pad struct size to 16 byte alignment and +- * make @address_range_array 16 byte aligned. +- * @address_range_array: +- * Array of &struct ffa_cons_mrd entries. +- */ +-struct ffa_comp_mrd { +- uint32_t total_page_count; +- uint32_t address_range_count; +- uint64_t reserved_8_15; +- struct ffa_cons_mrd address_range_array[]; +-}; +-STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16); +- +-/** +- * typedef ffa_mem_attr8_t - Memory region attributes +- * +- * * @FFA_MEM_ATTR_DEVICE_NGNRNE: +- * Device-nGnRnE. +- * * @FFA_MEM_ATTR_DEVICE_NGNRE: +- * Device-nGnRE. +- * * @FFA_MEM_ATTR_DEVICE_NGRE: +- * Device-nGRE. +- * * @FFA_MEM_ATTR_DEVICE_GRE: +- * Device-GRE. +- * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED +- * Normal memory. Non-cacheable. +- * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB +- * Normal memory. Write-back cached. +- * * @FFA_MEM_ATTR_NON_SHAREABLE +- * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. +- * * @FFA_MEM_ATTR_OUTER_SHAREABLE +- * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. +- * * @FFA_MEM_ATTR_INNER_SHAREABLE +- * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*. +- */ +-typedef uint8_t ffa_mem_attr8_t; +-#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2)) +-#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2)) +-#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2)) +-#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2)) +-#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2)) +-#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2)) +-#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0) +-#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0) +-#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0) +- +-/** +- * typedef ffa_mem_perm8_t - Memory access permissions +- * +- * * @FFA_MEM_ATTR_RO +- * Request or specify read-only mapping. +- * * @FFA_MEM_ATTR_RW +- * Request or allow read-write mapping. +- * * @FFA_MEM_PERM_NX +- * Deny executable mapping. +- * * @FFA_MEM_PERM_X +- * Request executable mapping. +- */ +-typedef uint8_t ffa_mem_perm8_t; +-#define FFA_MEM_PERM_RO (1U << 0) +-#define FFA_MEM_PERM_RW (1U << 1) +-#define FFA_MEM_PERM_NX (1U << 2) +-#define FFA_MEM_PERM_X (1U << 3) +- +-/** +- * typedef ffa_mem_flag8_t - Endpoint memory flags +- * +- * * @FFA_MEM_FLAG_OTHER +- * Other borrower. Memory region must not be or was not retrieved on behalf +- * of this endpoint. +- */ +-typedef uint8_t ffa_mem_flag8_t; +-#define FFA_MEM_FLAG_OTHER (1U << 0) +- +-/** +- * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags +- * +- * * @FFA_MTD_FLAG_ZERO_MEMORY +- * Zero memory after unmapping from sender (must be 0 for share). +- * * @FFA_MTD_FLAG_TIME_SLICING +- * Not supported by this implementation. +- * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH +- * Zero memory after unmapping from borrowers (must be 0 for share). +- * * @FFA_MTD_FLAG_TYPE_MASK +- * Bit-mask to extract memory management transaction type from flags. +- * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY +- * Share memory transaction flag. +- * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from +- * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that +- * it must have. +- * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK +- * Not supported by this implementation. +- */ +-typedef uint32_t ffa_mtd_flag32_t; +-#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0) +-#define FFA_MTD_FLAG_TIME_SLICING (1U << 1) +-#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2) +-#define FFA_MTD_FLAG_TYPE_MASK (3U << 3) +-#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3) +-#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5) +- +-/** +- * struct ffa_mapd - Memory access permissions descriptor +- * @endpoint_id: +- * Endpoint id that @memory_access_permissions and @flags apply to. +- * (&typedef ffa_endpoint_id16_t). +- * @memory_access_permissions: +- * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t). +- * @flags: +- * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t). +- */ +-struct ffa_mapd { +- ffa_endpoint_id16_t endpoint_id; +- ffa_mem_perm8_t memory_access_permissions; +- ffa_mem_flag8_t flags; +-}; +-STATIC_ASSERT(sizeof(struct ffa_mapd) == 4); +- +-/** +- * struct ffa_emad - Endpoint memory access descriptor. +- * @mapd: &struct ffa_mapd. +- * @comp_mrd_offset: +- * Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd. +- * @reserved_8_15: +- * Reserved bytes 8-15. Must be 0. +- */ +-struct ffa_emad { +- struct ffa_mapd mapd; +- uint32_t comp_mrd_offset; +- uint64_t reserved_8_15; +-}; +-STATIC_ASSERT(sizeof(struct ffa_emad) == 16); +- +-/** +- * struct ffa_mtd - Memory transaction descriptor. +- * @sender_id: +- * Sender endpoint id. +- * @memory_region_attributes: +- * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t). +- * @reserved_3: +- * Reserved bytes 3. Must be 0. +- * @flags: +- * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t). +- * @handle: +- * Id of shared memory object. Most be 0 for MEM_SHARE. +- * @tag: Client allocated tag. Must match original value. +- * @reserved_24_27: +- * Reserved bytes 24-27. Must be 0. +- * @emad_count: +- * Number of entries in @emad. Must be 1 in current implementation. +- * FFA spec allows more entries. +- * @emad: +- * Endpoint memory access descriptor array (see @struct ffa_emad). +- */ +-struct ffa_mtd { +- ffa_endpoint_id16_t sender_id; +- ffa_mem_attr8_t memory_region_attributes; +- uint8_t reserved_3; +- ffa_mtd_flag32_t flags; +- uint64_t handle; +- uint64_t tag; +- uint32_t reserved_24_27; +- uint32_t emad_count; +- struct ffa_emad emad[]; +-}; +-STATIC_ASSERT(sizeof(struct ffa_mtd) == 32); +- +-/** +- * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor. +- * @handle: +- * Id of shared memory object to relinquish. +- * @flags: +- * If bit 0 is set clear memory after unmapping from borrower. Must be 0 +- * for share. Bit[1]: Time slicing. Not supported, must be 0. All other +- * bits are reserved 0. +- * @endpoint_count: +- * Number of entries in @endpoint_array. +- * @endpoint_array: +- * Array of endpoint ids. +- */ +-struct ffa_mem_relinquish_descriptor { +- uint64_t handle; +- uint32_t flags; +- uint32_t endpoint_count; +- ffa_endpoint_id16_t endpoint_array[]; +-}; +-STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16); +- +-/** +- * enum ffa_error - FF-A error code +- * @FFA_ERROR_NOT_SUPPORTED: +- * Operation contained possibly valid parameters not supported by the +- * current implementation. Does not match FF-A 1.0 EAC 1_0 definition. +- * @FFA_ERROR_INVALID_PARAMETERS: +- * Invalid parameters. Conditions function specific. +- * @FFA_ERROR_NO_MEMORY: +- * Not enough memory. +- * @FFA_ERROR_DENIED: +- * Operation not allowed. Conditions function specific. +- * +- * FF-A 1.0 EAC 1_0 defines other error codes as well but the current +- * implementation does not use them. +- */ +-enum ffa_error { +- FFA_ERROR_NOT_SUPPORTED = -1, +- FFA_ERROR_INVALID_PARAMETERS = -2, +- FFA_ERROR_NO_MEMORY = -3, +- FFA_ERROR_DENIED = -6, +-}; +- +-/** +- * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA +- */ +-#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60) +- +-/** +- * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA +- */ +-#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F) +- +-/** +- * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA +- */ +-#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60) +- +-/** +- * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA +- */ +-#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F) +- +-/** +- * SMC_FC_FFA_ERROR - SMC error return opcode +- * +- * Register arguments: +- * +- * * w1: VMID in [31:16], vCPU in [15:0] +- * * w2: Error code (&enum ffa_error) +- */ +-#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60) +- +-/** +- * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode +- * +- * Register arguments: +- * +- * * w1: VMID in [31:16], vCPU in [15:0] +- * * w2-w7: Function specific +- */ +-#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61) +- +-/** +- * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode +- * +- * Register arguments: +- * +- * * w1: VMID in [31:16], vCPU in [15:0] +- * * w2/x2-w7/x7: Function specific +- */ +-#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61) +- +-/** +- * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version +- * +- * Register arguments: +- * +- * * w1: Major version bit[30:16] and minor version in bit[15:0] supported +- * by caller. Bit[31] must be 0. +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- * * w2: Major version bit[30:16], minor version in bit[15:0], bit[31] must +- * be 0. +- * +- * or +- * +- * * w0: SMC_FC_FFA_ERROR +- * * w2: FFA_ERROR_NOT_SUPPORTED if major version passed in is less than the +- * minimum major version supported. +- */ +-#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63) +- +-/** +- * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support +- * +- * Register arguments: +- * +- * * w1: FF-A function ID +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- * * w2: Bit[0]: Supports custom buffers for memory transactions. +- * Bit[1:0]: For RXTX_MAP min buffer size and alignment boundary. +- * Other bits must be 0. +- * * w3: For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can +- * retrieve each memory region before relinquishing it specified as +- * ((1U << (value + 1)) - 1 (or value = bits in reference count - 1). +- * For all other bits and commands: must be 0. +- * or +- * +- * * w0: SMC_FC_FFA_ERROR +- * * w2: FFA_ERROR_NOT_SUPPORTED if function is not implemented, or +- * FFA_ERROR_INVALID_PARAMETERS if function id is not valid. +- */ +-#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64) +- +-/** +- * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers +- * +- * Register arguments: +- * +- * * w1: TX address +- * * w2: RX address +- * * w3: RX/TX page count in bit[5:0] +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- */ +-#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66) +- +-/** +- * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers +- * +- * Register arguments: +- * +- * * x1: TX address +- * * x2: RX address +- * * x3: RX/TX page count in bit[5:0] +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- */ +-#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66) +-#ifdef CONFIG_64BIT +-#define SMC_FCZ_FFA_RXTX_MAP SMC_FC64_FFA_RXTX_MAP +-#else +-#define SMC_FCZ_FFA_RXTX_MAP SMC_FC_FFA_RXTX_MAP +-#endif +- +-/** +- * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers +- * +- * Register arguments: +- * +- * * w1: ID in [31:16] +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- */ +-#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67) +- +-/** +- * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- * * w2: ID in bit[15:0], bit[31:16] must be 0. +- */ +-#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69) +- +-/** +- * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory +- * +- * Not supported. +- */ +-#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71) +- +-/** +- * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory +- * +- * Not currently supported. +- */ +-#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72) +- +-/** +- * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory +- * +- * Register arguments: +- * +- * * w1: Total length +- * * w2: Fragment length +- * * w3: Address +- * * w4: Page count +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- * * w2/w3: Handle +- * +- * or +- * +- * * w0: &SMC_FC_FFA_MEM_FRAG_RX +- * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX +- * +- * or +- * +- * * w0: SMC_FC_FFA_ERROR +- * * w2: Error code (&enum ffa_error) +- */ +-#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73) +- +-/** +- * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory +- * +- * Register arguments: +- * +- * * w1: Total length +- * * w2: Fragment length +- * * x3: Address +- * * w4: Page count +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- * * w2/w3: Handle +- * +- * or +- * +- * * w0: &SMC_FC_FFA_MEM_FRAG_RX +- * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX +- * +- * or +- * +- * * w0: SMC_FC_FFA_ERROR +- * * w2: Error code (&enum ffa_error) +- */ +-#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73) +- +-/** +- * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory +- * +- * Register arguments: +- * +- * * w1: Total length +- * * w2: Fragment length +- * * w3: Address +- * * w4: Page count +- * +- * Return: +- * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP +- * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP +- */ +-#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74) +- +-/** +- * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory +- * +- * Register arguments: +- * +- * * w1: Total length +- * * w2: Fragment length +- * * x3: Address +- * * w4: Page count +- * +- * Return: +- * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP +- * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP +- */ +-#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74) +- +-/** +- * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode +- * +- * Register arguments: +- * +- * * w1: Total length +- * * w2: Fragment length +- */ +-#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75) +- +-/** +- * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory +- * +- * Input in &struct ffa_mem_relinquish_descriptor format in message buffer. +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- */ +-#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76) +- +-/** +- * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory +- * +- * Register arguments: +- * +- * * w1/w2: Handle +- * * w3: Flags +- * +- * Return: +- * * w0: &SMC_FC_FFA_SUCCESS +- */ +-#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77) +- +-/** +- * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment. +- * +- * Register arguments: +- * +- * * w1/w2: Cookie +- * * w3: Fragment offset. +- * * w4: Endpoint id ID in [31:16], if client is hypervisor. +- * +- * Return: +- * * w0: &SMC_FC_FFA_MEM_FRAG_TX +- * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_FRAG_TX +- */ +-#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A) +- +-/** +- * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment +- * +- * Register arguments: +- * +- * * w1/w2: Cookie +- * * w3: Fragment length. +- * * w4: Sender endpoint id ID in [31:16], if client is hypervisor. +- * +- * Return: +- * * w0: &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS. +- * * w1/x1-w5/x5: See opcode in w0. +- */ +-#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B) +- +-#endif /* __LINUX_TRUSTY_ARM_FFA_H */ +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index efbb36999a8b..272d96c1c696 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -52,9 +52,6 @@ u32 trusty_get_api_version(struct device *dev); + bool trusty_get_panic_status(struct device *dev); + + struct ns_mem_page_info { +- u64 paddr; +- u8 ffa_mem_attr; +- u8 ffa_mem_perm; + u64 compat_attr; + }; + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0037-ANDROID-trusty-Rename-transfer-memory-function-to-le.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0037-ANDROID-trusty-Rename-transfer-memory-function-to-le.patch new file mode 100644 index 0000000000..a7bc06004e --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0037-ANDROID-trusty-Rename-transfer-memory-function-to-le.patch @@ -0,0 +1,191 @@ +From 804ef860d9757cbe31b606fd5ec68cc5454c88f8 Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Tue, 18 Jan 2022 18:27:09 +0000 +Subject: [PATCH 24/32] ANDROID: trusty: Rename transfer memory function to + lend memory + +Renaming trusty_transfer_memory to trusty_lend_memory allows Trusty +to export memory operation like share, lend, reclaim and use common +code for memory share and lend operations. + +Define TRUSTY_DEFAULT_MEM_OBJ_TAG as 0 and use that in existing calls. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Ie165a609cc4398bb916967595d0b748d54d75faf +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/trusty-ipc.c | 14 ++++++++---- + drivers/trusty/trusty-test.c | 3 ++- + drivers/trusty/trusty-virtio.c | 3 ++- + drivers/trusty/trusty.c | 41 ++++++++++++++++++++++------------ + include/linux/trusty/trusty.h | 11 ++++----- + 5 files changed, 47 insertions(+), 25 deletions(-) + +diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c +index 82d6ddeb41f4..0a27af2063a7 100644 +--- a/drivers/trusty/trusty-ipc.c ++++ b/drivers/trusty/trusty-ipc.c +@@ -1233,10 +1233,16 @@ static int dn_share_fd(struct tipc_dn_chan *dn, int fd, + goto cleanup_handle; + } + +- ret = trusty_transfer_memory(tipc_shared_handle_dev(shared_handle), +- &mem_id, shared_handle->sgt->sgl, +- shared_handle->sgt->orig_nents, prot, tag, +- lend); ++ if (lend) ++ ret = trusty_lend_memory(tipc_shared_handle_dev(shared_handle), ++ &mem_id, shared_handle->sgt->sgl, ++ shared_handle->sgt->orig_nents, prot, ++ tag); ++ else ++ ret = trusty_share_memory(tipc_shared_handle_dev(shared_handle), ++ &mem_id, shared_handle->sgt->sgl, ++ shared_handle->sgt->orig_nents, prot, ++ tag); + + if (ret < 0) { + dev_dbg(dev, "Transferring memory failed: %d\n", ret); +diff --git a/drivers/trusty/trusty-test.c b/drivers/trusty/trusty-test.c +index 844868981fa5..c25fc0f2fcf0 100644 +--- a/drivers/trusty/trusty-test.c ++++ b/drivers/trusty/trusty-test.c +@@ -138,7 +138,8 @@ static int trusty_test_share_objs(struct trusty_test_state *s, + t1 = ktime_get(); + tmpret = trusty_share_memory(s->trusty_dev, &obj->mem_id, + obj->sgt.sgl, obj->sgt.nents, +- PAGE_KERNEL); ++ PAGE_KERNEL, ++ TRUSTY_DEFAULT_MEM_OBJ_TAG); + t2 = ktime_get(); + if (tmpret) { + ret = tmpret; +diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c +index fea59cd2e218..365e7c04bcf4 100644 +--- a/drivers/trusty/trusty-virtio.c ++++ b/drivers/trusty/trusty-virtio.c +@@ -626,7 +626,8 @@ static int trusty_virtio_add_devices(struct trusty_ctx *tctx) + + sg_init_one(&tctx->shared_sg, descr_va, descr_buf_sz); + ret = trusty_share_memory(tctx->dev->parent, &descr_id, +- &tctx->shared_sg, 1, PAGE_KERNEL); ++ &tctx->shared_sg, 1, PAGE_KERNEL, ++ TRUSTY_DEFAULT_MEM_OBJ_TAG); + if (ret) { + dev_err(tctx->dev, "trusty_share_memory failed: %d\n", ret); + goto err_share_memory; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 2dec75398f69..6bd30bc1bbc9 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -222,18 +222,9 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + } + EXPORT_SYMBOL(trusty_std_call32); + +-int trusty_share_memory(struct device *dev, u64 *id, +- struct scatterlist *sglist, unsigned int nents, +- pgprot_t pgprot) +-{ +- return trusty_transfer_memory(dev, id, sglist, nents, pgprot, 0, +- false); +-} +-EXPORT_SYMBOL(trusty_share_memory); +- +-int trusty_transfer_memory(struct device *dev, u64 *id, +- struct scatterlist *sglist, unsigned int nents, +- pgprot_t pgprot, u64 tag, bool lend) ++static int __trusty_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag, bool mem_share) + { + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + int ret; +@@ -253,6 +244,12 @@ int trusty_transfer_memory(struct device *dev, u64 *id, + return -EOPNOTSUPP; + } + ++ if (mem_share == false && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { ++ dev_err(s->dev, "%s: old trusty version does not support lending memory objects\n", ++ __func__); ++ return -EOPNOTSUPP; ++ } ++ + count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); + if (count != nents) { + dev_err(s->dev, "failed to dma map sg_table\n"); +@@ -271,7 +268,22 @@ int trusty_transfer_memory(struct device *dev, u64 *id, + *id = pg_inf.compat_attr; + return 0; + } +-EXPORT_SYMBOL(trusty_transfer_memory); ++ ++int trusty_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag) ++{ ++ return __trusty_share_memory(dev, id, sglist, nents, pgprot, tag, true); ++} ++EXPORT_SYMBOL(trusty_share_memory); ++ ++int trusty_lend_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag) ++{ ++ return __trusty_share_memory(dev, id, sglist, nents, pgprot, tag, false); ++} ++EXPORT_SYMBOL(trusty_lend_memory); + + /* + * trusty_share_memory_compat - trusty_share_memory wrapper for old apis +@@ -287,7 +299,8 @@ int trusty_share_memory_compat(struct device *dev, u64 *id, + int ret; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + +- ret = trusty_share_memory(dev, id, sglist, nents, pgprot); ++ ret = trusty_share_memory(dev, id, sglist, nents, pgprot, ++ TRUSTY_DEFAULT_MEM_OBJ_TAG); + if (!ret && s->api_version < TRUSTY_API_VERSION_PHYS_MEM_OBJ) + *id &= 0x0000FFFFFFFFF000ull; + +diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h +index 272d96c1c696..27f635f2d12d 100644 +--- a/include/linux/trusty/trusty.h ++++ b/include/linux/trusty/trusty.h +@@ -11,6 +11,7 @@ + #include <linux/device.h> + #include <linux/pagemap.h> + ++#define TRUSTY_DEFAULT_MEM_OBJ_TAG (0) + + #if IS_ENABLED(CONFIG_TRUSTY) + s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2); +@@ -62,13 +63,13 @@ struct scatterlist; + typedef u64 trusty_shared_mem_id_t; + int trusty_share_memory(struct device *dev, trusty_shared_mem_id_t *id, + struct scatterlist *sglist, unsigned int nents, +- pgprot_t pgprot); ++ pgprot_t pgprot, u64 tag); + int trusty_share_memory_compat(struct device *dev, trusty_shared_mem_id_t *id, + struct scatterlist *sglist, unsigned int nents, + pgprot_t pgprot); +-int trusty_transfer_memory(struct device *dev, u64 *id, +- struct scatterlist *sglist, unsigned int nents, +- pgprot_t pgprot, u64 tag, bool lend); ++int trusty_lend_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag); + int trusty_reclaim_memory(struct device *dev, trusty_shared_mem_id_t id, + struct scatterlist *sglist, unsigned int nents); + +@@ -78,7 +79,7 @@ u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf); + #else + static inline u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf) + { +- return 0; ++ return TRUSTY_DEFAULT_MEM_OBJ_TAG; + } + #endif + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0038-ANDROID-trusty-Separate-out-SMC-based-transport.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0038-ANDROID-trusty-Separate-out-SMC-based-transport.patch new file mode 100644 index 0000000000..c4ff31c07e --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0038-ANDROID-trusty-Separate-out-SMC-based-transport.patch @@ -0,0 +1,495 @@ +From 844cdefb8b0f6b1f75cf4cbaa2d9260959a26e02 Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Fri, 14 Jan 2022 14:02:39 +0000 +Subject: [PATCH 25/32] ANDROID: trusty: Separate out SMC based transport + +This commit refactors SMC based transport operation like +smc_fastcalls, smc memory operations in a separate file. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Iebee505b7172f6247186e3bf1e0b50740b2e4dfa +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-private.h | 61 ++++++++++++++ + drivers/trusty/trusty-smc.c | 136 ++++++++++++++++++++++++++++++ + drivers/trusty/trusty.c | 144 +++++++++----------------------- + 4 files changed, 237 insertions(+), 105 deletions(-) + create mode 100644 drivers/trusty/trusty-private.h + create mode 100644 drivers/trusty/trusty-smc.c + +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index 2cf1cfccf97b..fbb53ee93003 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -5,6 +5,7 @@ + + obj-$(CONFIG_TRUSTY) += trusty-core.o + trusty-core-objs += trusty.o trusty-mem.o ++trusty-core-objs += trusty-smc.o + trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o + trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o + obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o +diff --git a/drivers/trusty/trusty-private.h b/drivers/trusty/trusty-private.h +new file mode 100644 +index 000000000000..4d73c6ae35d4 +--- /dev/null ++++ b/drivers/trusty/trusty-private.h +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2022 ARM Ltd. ++ */ ++ ++#ifndef _TRUSTY_PRIVATE_H ++#define _TRUSTY_PRIVATE_H ++ ++#include <linux/types.h> ++ ++struct trusty_work { ++ struct trusty_state *ts; ++ struct work_struct work; ++}; ++ ++struct trusty_msg_ops { ++ u32 (*send_direct_msg)(struct device *dev, unsigned long fid, ++ unsigned long a0, unsigned long a1, ++ unsigned long a2); ++}; ++ ++struct trusty_mem_ops { ++ int (*trusty_share_memory)(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, u64 tag); ++ int (*trusty_lend_memory)(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, u64 tag); ++ int (*trusty_reclaim_memory)(struct device *dev, u64 id, ++ struct scatterlist *sglist, ++ unsigned int nents); ++}; ++ ++struct trusty_state { ++ struct mutex smc_lock; ++ struct atomic_notifier_head notifier; ++ struct completion cpu_idle_completion; ++ char *version_str; ++ u32 api_version; ++ bool trusty_panicked; ++ struct device *dev; ++ struct workqueue_struct *nop_wq; ++ struct trusty_work __percpu *nop_works; ++ struct list_head nop_queue; ++ spinlock_t nop_lock; /* protects nop_queue */ ++ struct device_dma_parameters dma_parms; ++ const struct trusty_msg_ops *msg_ops; ++ const struct trusty_mem_ops *mem_ops; ++}; ++ ++int trusty_init_api_version(struct trusty_state *s, struct device *dev, ++ u32 (*send_direct_msg)(struct device *dev, ++ unsigned long fid, ++ unsigned long a0, ++ unsigned long a1, ++ unsigned long a2)); ++ ++int trusty_smc_transport_setup(struct device *dev); ++void trusty_smc_transport_cleanup(struct device *dev); ++ ++#endif /* _TRUSTY_PRIVATE_H */ +diff --git a/drivers/trusty/trusty-smc.c b/drivers/trusty/trusty-smc.c +new file mode 100644 +index 000000000000..8fa841e0e253 +--- /dev/null ++++ b/drivers/trusty/trusty-smc.c +@@ -0,0 +1,136 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2013 Google, Inc. ++ */ ++ ++#include <linux/platform_device.h> ++#include <linux/trusty/smcall.h> ++#include <linux/trusty/trusty.h> ++ ++#include <linux/scatterlist.h> ++#include <linux/dma-mapping.h> ++ ++#include "trusty-smc.h" ++#include "trusty-private.h" ++ ++static u32 trusty_smc_send_direct_msg(struct device *dev, unsigned long fid, ++ unsigned long a0, unsigned long a1, ++ unsigned long a2) ++{ ++ return trusty_smc8(fid, a0, a1, a2, 0, 0, 0, 0).r0; ++} ++ ++static int trusty_smc_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, u64 tag) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ int ret; ++ struct ns_mem_page_info pg_inf; ++ struct scatterlist *sg; ++ size_t count; ++ ++ if (WARN_ON(nents < 1)) ++ return -EINVAL; ++ ++ if (nents != 1) { ++ dev_err(s->dev, "%s: old trusty version does not support " ++ "non-contiguous memory objects\n", __func__); ++ return -EOPNOTSUPP; ++ } ++ ++ count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ if (count != nents) { ++ dev_err(s->dev, "failed to dma map sg_table\n"); ++ return -EINVAL; ++ } ++ ++ sg = sglist; ++ ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)), ++ pgprot); ++ if (ret) { ++ dev_err(s->dev, "%s: trusty_encode_page_info failed\n", ++ __func__); ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ return ret; ++ } ++ ++ *id = pg_inf.compat_attr; ++ return 0; ++} ++ ++static int trusty_smc_lend_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, u64 tag) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static int trusty_smc_reclaim_memory(struct device *dev, u64 id, ++ struct scatterlist *sglist, ++ unsigned int nents) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(nents < 1)) ++ return -EINVAL; ++ ++ if (WARN_ON(s->api_version >= TRUSTY_API_VERSION_MEM_OBJ)) ++ return -EINVAL; ++ ++ if (nents != 1) { ++ dev_err(s->dev, "%s: not supported\n", __func__); ++ return -EOPNOTSUPP; ++ } ++ ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++ return 0; ++} ++ ++static const struct trusty_msg_ops trusty_smc_msg_ops = { ++ .send_direct_msg = &trusty_smc_send_direct_msg, ++}; ++ ++static const struct trusty_mem_ops trusty_smc_mem_ops = { ++ .trusty_share_memory = &trusty_smc_share_memory, ++ .trusty_lend_memory = &trusty_smc_lend_memory, ++ .trusty_reclaim_memory = &trusty_smc_reclaim_memory, ++}; ++ ++int trusty_smc_transport_setup(struct device *dev) ++{ ++ int rc; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ rc = trusty_init_api_version(s, dev, &trusty_smc_send_direct_msg); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ /* ++ * Initialize Trusty msg calls with Trusty SMC ABI ++ */ ++ s->msg_ops = &trusty_smc_msg_ops; ++ ++ /* ++ * Initialize Trusty memory operations with Trusty SMC ABI only when ++ * Trusty API version is below TRUSTY_API_VERSION_MEM_OBJ. ++ */ ++ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) ++ s->mem_ops = &trusty_smc_mem_ops; ++ ++ return 0; ++} ++ ++void trusty_smc_transport_cleanup(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (s->msg_ops == &trusty_smc_msg_ops) ++ s->msg_ops = NULL; ++ ++ if (s->mem_ops == &trusty_smc_mem_ops) ++ s->mem_ops = NULL; ++} +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 6bd30bc1bbc9..0486827a45ca 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -18,37 +18,10 @@ + #include <linux/scatterlist.h> + #include <linux/dma-mapping.h> + +-#include "trusty-smc.h" ++#include "trusty-private.h" + +-struct trusty_state; + static struct platform_driver trusty_driver; + +-struct trusty_work { +- struct trusty_state *ts; +- struct work_struct work; +-}; +- +-struct trusty_state { +- struct mutex smc_lock; +- struct atomic_notifier_head notifier; +- struct completion cpu_idle_completion; +- char *version_str; +- u32 api_version; +- bool trusty_panicked; +- struct device *dev; +- struct workqueue_struct *nop_wq; +- struct trusty_work __percpu *nop_works; +- struct list_head nop_queue; +- spinlock_t nop_lock; /* protects nop_queue */ +- struct device_dma_parameters dma_parms; +-}; +- +-static inline unsigned long smc(unsigned long r0, unsigned long r1, +- unsigned long r2, unsigned long r3) +-{ +- return trusty_smc8(r0, r1, r2, r3, 0, 0, 0, 0).r0; +-} +- + s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + { + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); +@@ -60,7 +33,7 @@ s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + if (WARN_ON(SMC_IS_SMC64(smcnr))) + return SM_ERR_INVALID_PARAMETERS; + +- return smc(smcnr, a0, a1, a2); ++ return s->msg_ops->send_direct_msg(dev, smcnr, a0, a1, a2); + } + EXPORT_SYMBOL(trusty_fast_call32); + +@@ -76,7 +49,7 @@ s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2) + if (WARN_ON(!SMC_IS_SMC64(smcnr))) + return SM_ERR_INVALID_PARAMETERS; + +- return smc(smcnr, a0, a1, a2); ++ return s->msg_ops->send_direct_msg(dev, smcnr, a0, a1, a2); + } + EXPORT_SYMBOL(trusty_fast_call64); + #endif +@@ -88,13 +61,16 @@ static unsigned long trusty_std_call_inner(struct device *dev, + { + unsigned long ret; + int retry = 5; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n", + __func__, smcnr, a0, a1, a2); + while (true) { +- ret = smc(smcnr, a0, a1, a2); ++ ret = s->msg_ops->send_direct_msg(dev, smcnr, a0, a1, a2); + while ((s32)ret == SM_ERR_FIQ_INTERRUPTED) +- ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0); ++ ret = s->msg_ops->send_direct_msg(dev, ++ SMC_SC_RESTART_FIQ, ++ 0, 0, 0); + if ((int)ret != SM_ERR_BUSY || !retry) + break; + +@@ -222,58 +198,17 @@ s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) + } + EXPORT_SYMBOL(trusty_std_call32); + +-static int __trusty_share_memory(struct device *dev, u64 *id, +- struct scatterlist *sglist, unsigned int nents, +- pgprot_t pgprot, u64 tag, bool mem_share) ++int trusty_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, unsigned int nents, ++ pgprot_t pgprot, u64 tag) + { + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); +- int ret; +- struct ns_mem_page_info pg_inf; +- struct scatterlist *sg; +- size_t count; + + if (WARN_ON(dev->driver != &trusty_driver.driver)) + return -EINVAL; + +- if (WARN_ON(nents < 1)) +- return -EINVAL; +- +- if (nents != 1 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { +- dev_err(s->dev, "%s: old trusty version does not support non-contiguous memory objects\n", +- __func__); +- return -EOPNOTSUPP; +- } +- +- if (mem_share == false && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { +- dev_err(s->dev, "%s: old trusty version does not support lending memory objects\n", +- __func__); +- return -EOPNOTSUPP; +- } +- +- count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); +- if (count != nents) { +- dev_err(s->dev, "failed to dma map sg_table\n"); +- return -EINVAL; +- } +- +- sg = sglist; +- ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)), +- pgprot); +- if (ret) { +- dev_err(s->dev, "%s: trusty_encode_page_info failed\n", +- __func__); +- return ret; +- } +- +- *id = pg_inf.compat_attr; +- return 0; +-} +- +-int trusty_share_memory(struct device *dev, u64 *id, +- struct scatterlist *sglist, unsigned int nents, +- pgprot_t pgprot, u64 tag) +-{ +- return __trusty_share_memory(dev, id, sglist, nents, pgprot, tag, true); ++ return s->mem_ops->trusty_share_memory(dev, id, sglist, nents, pgprot, ++ tag); + } + EXPORT_SYMBOL(trusty_share_memory); + +@@ -281,7 +216,13 @@ int trusty_lend_memory(struct device *dev, u64 *id, + struct scatterlist *sglist, unsigned int nents, + pgprot_t pgprot, u64 tag) + { +- return __trusty_share_memory(dev, id, sglist, nents, pgprot, tag, false); ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ if (WARN_ON(dev->driver != &trusty_driver.driver)) ++ return -EINVAL; ++ ++ return s->mem_ops->trusty_lend_memory(dev, id, sglist, nents, pgprot, ++ tag); + } + EXPORT_SYMBOL(trusty_lend_memory); + +@@ -316,22 +257,7 @@ int trusty_reclaim_memory(struct device *dev, u64 id, + if (WARN_ON(dev->driver != &trusty_driver.driver)) + return -EINVAL; + +- if (WARN_ON(nents < 1)) +- return -EINVAL; +- +- if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { +- if (nents != 1) { +- dev_err(s->dev, "%s: not supported\n", __func__); +- return -EOPNOTSUPP; +- } +- +- dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); +- +- dev_dbg(s->dev, "%s: done\n", __func__); +- return 0; +- } +- +- return 0; ++ return s->mem_ops->trusty_reclaim_memory(dev, id, sglist, nents); + } + EXPORT_SYMBOL(trusty_reclaim_memory); + +@@ -382,7 +308,7 @@ const char *trusty_version_str_get(struct device *dev) + } + EXPORT_SYMBOL(trusty_version_str_get); + +-static void trusty_init_version(struct trusty_state *s, struct device *dev) ++static void trusty_init_version_str(struct trusty_state *s, struct device *dev) + { + int ret; + int i; +@@ -430,12 +356,17 @@ bool trusty_get_panic_status(struct device *dev) + } + EXPORT_SYMBOL(trusty_get_panic_status); + +-static int trusty_init_api_version(struct trusty_state *s, struct device *dev) ++int trusty_init_api_version(struct trusty_state *s, struct device *dev, ++ u32 (*send_direct_msg)(struct device *dev, ++ unsigned long fid, ++ unsigned long a0, ++ unsigned long a1, ++ unsigned long a2)) + { + u32 api_version; + +- api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION, +- TRUSTY_API_VERSION_CURRENT, 0, 0); ++ api_version = send_direct_msg(dev, SMC_FC_API_VERSION, ++ TRUSTY_API_VERSION_CURRENT, 0, 0); + if (api_version == SM_ERR_UNDEFINED_SMC) + api_version = 0; + +@@ -598,11 +529,12 @@ static int trusty_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, s); + +- trusty_init_version(s, &pdev->dev); ++ /* Initialize SMC transport */ ++ ret = trusty_smc_transport_setup(s->dev); ++ if (ret != 0 || s->msg_ops == NULL || s->mem_ops == NULL) ++ goto err_transport_setup; + +- ret = trusty_init_api_version(s, &pdev->dev); +- if (ret < 0) +- goto err_api_version; ++ trusty_init_version_str(s, &pdev->dev); + + s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0); + if (!s->nop_wq) { +@@ -648,9 +580,10 @@ static int trusty_probe(struct platform_device *pdev) + err_alloc_works: + destroy_workqueue(s->nop_wq); + err_create_nop_wq: +-err_api_version: +- s->dev->dma_parms = NULL; + kfree(s->version_str); ++ trusty_smc_transport_cleanup(s->dev); ++err_transport_setup: ++ s->dev->dma_parms = NULL; + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + mutex_destroy(&s->smc_lock); + kfree(s); +@@ -673,6 +606,7 @@ static int trusty_remove(struct platform_device *pdev) + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); + ++ trusty_smc_transport_cleanup(s->dev); + mutex_destroy(&s->smc_lock); + s->dev->dma_parms = NULL; + kfree(s->version_str); +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0039-ANDROID-trusty-Modify-device-compatible-string.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0039-ANDROID-trusty-Modify-device-compatible-string.patch new file mode 100644 index 0000000000..8fd1c7ced2 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0039-ANDROID-trusty-Modify-device-compatible-string.patch @@ -0,0 +1,56 @@ +From 5566c2a41443e26068fe3a8e4a8e4b0c3a4e8ed6 Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Fri, 14 Jan 2022 14:22:42 +0000 +Subject: [PATCH 26/32] ANDROID: trusty: Modify device compatible string + +Drop smc keyword from device tree node as Trusty can use SMC or FFA +based transport. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Id99b52f32a2122434a22f1991c0b4cd52b0676ed +Upstream-Status: Pending [Not submitted to upstream yet] +--- + Documentation/devicetree/bindings/trusty/trusty-irq.txt | 2 +- + Documentation/devicetree/bindings/trusty/trusty-smc.txt | 2 +- + drivers/trusty/trusty.c | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +index cbb545ad452b..ae02030be4e7 100644 +--- a/Documentation/devicetree/bindings/trusty/trusty-irq.txt ++++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt +@@ -48,7 +48,7 @@ Example: + }; + ... + trusty { +- compatible = "android,trusty-smc-v1"; ++ compatible = "android,trusty-v1"; + ranges; + #address-cells = <2>; + #size-cells = <2>; +diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt +index 1b39ad317c67..8d02a31ba814 100644 +--- a/Documentation/devicetree/bindings/trusty/trusty-smc.txt ++++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt +@@ -3,4 +3,4 @@ Trusty smc interface + Trusty is running in secure mode on the same (arm) cpu(s) as the current os. + + Required properties: +-- compatible: "android,trusty-smc-v1" ++- compatible: "android,trusty-v1" +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 0486827a45ca..757dd7b2c527 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -615,7 +615,7 @@ static int trusty_remove(struct platform_device *pdev) + } + + static const struct of_device_id trusty_of_match[] = { +- { .compatible = "android,trusty-smc-v1", }, ++ { .compatible = "android,trusty-v1", }, + {}, + }; + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0040-ANDROID-trusty-Add-transport-descriptor.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0040-ANDROID-trusty-Add-transport-descriptor.patch new file mode 100644 index 0000000000..53c76be5ca --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0040-ANDROID-trusty-Add-transport-descriptor.patch @@ -0,0 +1,209 @@ +From 27248b5c8cb5c1a59b08e46eb3ab918867282c1c Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Fri, 14 Jan 2022 17:52:33 +0000 +Subject: [PATCH 27/32] ANDROID: trusty: Add transport descriptor + +Use transport descriptor to hold transport specific operations. This +helps to add new transport to Trusty core. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Ibbde50de0302f6d259a7d572f0910067ce712b37 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/trusty-private.h | 20 +++++++++- + drivers/trusty/trusty-smc.c | 6 +++ + drivers/trusty/trusty.c | 71 ++++++++++++++++++++++++++++++--- + 3 files changed, 90 insertions(+), 7 deletions(-) + +diff --git a/drivers/trusty/trusty-private.h b/drivers/trusty/trusty-private.h +index 4d73c6ae35d4..74b88bb8f83b 100644 +--- a/drivers/trusty/trusty-private.h ++++ b/drivers/trusty/trusty-private.h +@@ -14,12 +14,14 @@ struct trusty_work { + }; + + struct trusty_msg_ops { ++ const struct trusty_transport_desc *desc; + u32 (*send_direct_msg)(struct device *dev, unsigned long fid, + unsigned long a0, unsigned long a1, + unsigned long a2); + }; + + struct trusty_mem_ops { ++ const struct trusty_transport_desc *desc; + int (*trusty_share_memory)(struct device *dev, u64 *id, + struct scatterlist *sglist, + unsigned int nents, pgprot_t pgprot, u64 tag); +@@ -46,6 +48,19 @@ struct trusty_state { + struct device_dma_parameters dma_parms; + const struct trusty_msg_ops *msg_ops; + const struct trusty_mem_ops *mem_ops; ++ struct trusty_ffa_state *ffa; ++}; ++ ++struct trusty_ffa_state { ++ struct device *dev; /* ffa device */ ++ const struct ffa_dev_ops *ops; ++ struct mutex share_memory_msg_lock; /* protects share_memory_msg */ ++}; ++ ++struct trusty_transport_desc { ++ const char *name; ++ int (*setup)(struct device *dev); ++ void (*cleanup)(struct device *dev); + }; + + int trusty_init_api_version(struct trusty_state *s, struct device *dev, +@@ -55,7 +70,8 @@ int trusty_init_api_version(struct trusty_state *s, struct device *dev, + unsigned long a1, + unsigned long a2)); + +-int trusty_smc_transport_setup(struct device *dev); +-void trusty_smc_transport_cleanup(struct device *dev); ++typedef const struct trusty_transport_desc *trusty_transports_t; ++ ++extern const struct trusty_transport_desc trusty_smc_transport; + + #endif /* _TRUSTY_PRIVATE_H */ +diff --git a/drivers/trusty/trusty-smc.c b/drivers/trusty/trusty-smc.c +index 8fa841e0e253..62d1d7060744 100644 +--- a/drivers/trusty/trusty-smc.c ++++ b/drivers/trusty/trusty-smc.c +@@ -134,3 +134,9 @@ void trusty_smc_transport_cleanup(struct device *dev) + if (s->mem_ops == &trusty_smc_mem_ops) + s->mem_ops = NULL; + } ++ ++const struct trusty_transport_desc trusty_smc_transport = { ++ .name = "smc", ++ .setup = trusty_smc_transport_setup, ++ .cleanup = trusty_smc_transport_cleanup, ++}; +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 757dd7b2c527..ec0fccfaa24c 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -493,6 +493,46 @@ void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop) + } + EXPORT_SYMBOL(trusty_dequeue_nop); + ++static int ++trusty_transports_setup(const trusty_transports_t *transports, ++ struct device *dev) ++{ ++ const struct trusty_transport_desc *transport; ++ int ret; ++ int transports_ret = -ENODEV; ++ ++ if (!transports) ++ return -EINVAL; ++ ++ for (; (transport = *transports); transports++) { ++ if (!transport->setup) ++ return -EINVAL; ++ ++ ret = transport->setup(dev); ++ transports_ret &= ret; ++ } ++ ++ /* One transport needs to complete setup without error. */ ++ if (transports_ret < 0) ++ return -ENODEV; ++ ++ return 0; ++} ++ ++static void ++trusty_transports_cleanup(const trusty_transports_t *transports, ++ struct device *dev) ++{ ++ const struct trusty_transport_desc *transport; ++ ++ for (; (transport = *transports); transports++) { ++ if (!transport->cleanup) ++ continue; ++ ++ transport->cleanup(dev); ++ } ++} ++ + static int trusty_probe(struct platform_device *pdev) + { + int ret; +@@ -500,6 +540,7 @@ static int trusty_probe(struct platform_device *pdev) + work_func_t work_func; + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; ++ const trusty_transports_t *descs; + + if (!node) { + dev_err(&pdev->dev, "of_node required\n"); +@@ -529,8 +570,12 @@ static int trusty_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, s); + +- /* Initialize SMC transport */ +- ret = trusty_smc_transport_setup(s->dev); ++ /* ++ * Initialize Trusty transport. Trusty msg and mem ops has to be ++ * initialized as part of transport setup. ++ */ ++ descs = of_device_get_match_data(&pdev->dev); ++ ret = trusty_transports_setup(descs, s->dev); + if (ret != 0 || s->msg_ops == NULL || s->mem_ops == NULL) + goto err_transport_setup; + +@@ -581,7 +626,7 @@ static int trusty_probe(struct platform_device *pdev) + destroy_workqueue(s->nop_wq); + err_create_nop_wq: + kfree(s->version_str); +- trusty_smc_transport_cleanup(s->dev); ++ trusty_transports_cleanup(descs, s->dev); + err_transport_setup: + s->dev->dma_parms = NULL; + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); +@@ -595,6 +640,7 @@ static int trusty_remove(struct platform_device *pdev) + { + unsigned int cpu; + struct trusty_state *s = platform_get_drvdata(pdev); ++ const trusty_transports_t *descs; + + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + +@@ -606,7 +652,10 @@ static int trusty_remove(struct platform_device *pdev) + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); + +- trusty_smc_transport_cleanup(s->dev); ++ /* call transport cleanup */ ++ descs = of_device_get_match_data(&pdev->dev); ++ trusty_transports_cleanup(descs, s->dev); ++ + mutex_destroy(&s->smc_lock); + s->dev->dma_parms = NULL; + kfree(s->version_str); +@@ -614,8 +663,20 @@ static int trusty_remove(struct platform_device *pdev) + return 0; + } + ++/* ++ * Trusty probe will try all compiled in transports and will use the transport ++ * supported by the Trusty kernel. ++ * ++ * For Trusty API version < TRUSTY_API_VERSION_MEM_OBJ: ++ * trusty_smc_transport used for messaging. ++ */ ++static const trusty_transports_t trusty_transports[] = { ++ &trusty_smc_transport, ++ NULL, ++}; ++ + static const struct of_device_id trusty_of_match[] = { +- { .compatible = "android,trusty-v1", }, ++ { .compatible = "android,trusty-v1", .data = trusty_transports }, + {}, + }; + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0041-ANDROID-trusty-Add-trusty-ffa-driver.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0041-ANDROID-trusty-Add-trusty-ffa-driver.patch new file mode 100644 index 0000000000..a61d2c882e --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0041-ANDROID-trusty-Add-trusty-ffa-driver.patch @@ -0,0 +1,312 @@ +From 3104eb14f62df1c7c4b9038eb914514b0ff371dc Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Fri, 14 Jan 2022 18:47:08 +0000 +Subject: [PATCH 28/32] ANDROID: trusty: Add trusty-ffa driver + +Initial changes related to FFA transport support + - Adds FFA transport descriptor + - Defines Trusty UUID + - Initializes FFA transport does probe, sets ffa_ops + - Defers Trusty probe if ARM FF-A driver is not initialized or + Trusty SP not found. + - Link FF-A device as the supplier for Trusty platform device. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: I78f72b85c20e4bad4c24cf0826e96f27dcf2ee1d +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/Makefile | 1 + + drivers/trusty/trusty-ffa.c | 196 ++++++++++++++++++++++++++++++++ + drivers/trusty/trusty-ffa.h | 28 +++++ + drivers/trusty/trusty-private.h | 1 + + drivers/trusty/trusty.c | 6 + + 5 files changed, 232 insertions(+) + create mode 100644 drivers/trusty/trusty-ffa.c + create mode 100644 drivers/trusty/trusty-ffa.h + +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index fbb53ee93003..797d61bf68ef 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -6,6 +6,7 @@ + obj-$(CONFIG_TRUSTY) += trusty-core.o + trusty-core-objs += trusty.o trusty-mem.o + trusty-core-objs += trusty-smc.o ++trusty-core-objs += trusty-ffa.o + trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o + trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o + obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o +diff --git a/drivers/trusty/trusty-ffa.c b/drivers/trusty/trusty-ffa.c +new file mode 100644 +index 000000000000..c8c16a1fc700 +--- /dev/null ++++ b/drivers/trusty/trusty-ffa.c +@@ -0,0 +1,196 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2022 ARM Ltd. ++ */ ++ ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/trusty/smcall.h> ++#include <linux/arm_ffa.h> ++#include <linux/trusty/trusty.h> ++ ++#include <linux/scatterlist.h> ++#include <linux/dma-mapping.h> ++ ++#include "trusty-ffa.h" ++#include "trusty-private.h" ++ ++static const struct trusty_mem_ops trusty_ffa_mem_ops = { ++ .desc = &trusty_ffa_transport, ++}; ++ ++static const struct ffa_device_id trusty_ffa_device_id[] = { ++ /* ++ * Trusty UID: RFC-4122 compliant UUID version 4 ++ * 40ee25f0-a2bc-304c-8c4ca173c57d8af1 ++ */ ++ { UUID_INIT(0x40ee25f0, 0xa2bc, 0x304c, ++ 0x8c, 0x4c, 0xa1, 0x73, 0xc5, 0x7d, 0x8a, 0xf1) }, ++ {} ++}; ++ ++static int trusty_ffa_dev_match(struct device *dev, const void *uuid) ++{ ++ struct ffa_device *ffa_dev; ++ ++ ffa_dev = to_ffa_dev(dev); ++ if (uuid_equal(&ffa_dev->uuid, uuid)) ++ return 1; ++ ++ return 0; ++} ++ ++static struct ffa_device *trusty_ffa_dev_find(void) ++{ ++ const void *data; ++ struct device *dev; ++ ++ /* currently only one trusty instance is probed */ ++ data = &trusty_ffa_device_id[0].uuid; ++ ++ dev = bus_find_device(&ffa_bus_type, NULL, data, trusty_ffa_dev_match); ++ if (dev) { ++ /* drop reference count */ ++ put_device(dev); ++ return to_ffa_dev(dev); ++ } ++ ++ return NULL; ++} ++ ++static int trusty_ffa_link_supplier(struct device *c_dev, struct device *s_dev) ++{ ++ if (!c_dev || !s_dev) ++ return -EINVAL; ++ ++ if (!device_link_add(c_dev, s_dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++/* ++ * called from trusty probe ++ */ ++static int trusty_ffa_transport_setup(struct device *dev) ++{ ++ int rc; ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ struct trusty_ffa_state *ffa_state; ++ struct ffa_device *ffa_dev; ++ ++ /* ffa transport not required for lower api versions */ ++ if (s->api_version != 0 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { ++ return -EINVAL; ++ } ++ ++ ffa_dev = trusty_ffa_dev_find(); ++ if (!ffa_dev) { ++ dev_dbg(dev, "FFA: Trusty device not found defer probe\n"); ++ return -EPROBE_DEFER; ++ } ++ ++ ffa_state = ffa_dev_get_drvdata(ffa_dev); ++ if (!ffa_state) ++ return -EINVAL; ++ ++ rc = trusty_ffa_link_supplier(dev, &ffa_dev->dev); ++ if (rc != 0) ++ return rc; ++ ++ /* FFA used only for memory sharing operations */ ++ if (s->api_version == TRUSTY_API_VERSION_MEM_OBJ) { ++ s->ffa = ffa_state; ++ s->mem_ops = &trusty_ffa_mem_ops; ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static void trusty_ffa_transport_cleanup(struct device *dev) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ ++ /* ffa transport not setup for lower api versions */ ++ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { ++ return; ++ } ++ ++ s->ffa = NULL; ++ s->mem_ops = NULL; ++} ++ ++static int trusty_ffa_probe(struct ffa_device *ffa_dev) ++{ ++ const struct ffa_dev_ops *ffa_ops; ++ struct trusty_ffa_state *s; ++ u32 ffa_drv_version; ++ ++ ffa_ops = ffa_dev_ops_get(ffa_dev); ++ if (!ffa_ops) { ++ dev_dbg(&ffa_dev->dev, "ffa_dev_ops_get: failed\n"); ++ return -ENOENT; ++ } ++ ++ /* check ffa driver version compatibility */ ++ ffa_drv_version = ffa_ops->api_version_get(); ++ if (TO_TRUSTY_FFA_MAJOR(ffa_drv_version) != TRUSTY_FFA_VERSION_MAJOR || ++ TO_TRUSTY_FFA_MINOR(ffa_drv_version) < TRUSTY_FFA_VERSION_MINOR) ++ return -EINVAL; ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) ++ return -ENOMEM; ++ ++ s->dev = &ffa_dev->dev; ++ s->ops = ffa_ops; ++ mutex_init(&s->share_memory_msg_lock); ++ ffa_dev_set_drvdata(ffa_dev, s); ++ ++ ffa_ops->mode_32bit_set(ffa_dev); ++ ++ return 0; ++} ++ ++static void trusty_ffa_remove(struct ffa_device *ffa_dev) ++{ ++ struct trusty_ffa_state *s; ++ ++ s = ffa_dev_get_drvdata(ffa_dev); ++ ++ mutex_destroy(&s->share_memory_msg_lock); ++ memset(s, 0, sizeof(struct trusty_ffa_state)); ++ kfree(s); ++} ++ ++static struct ffa_driver trusty_ffa_driver = { ++ .name = "trusty-ffa", ++ .probe = trusty_ffa_probe, ++ .remove = trusty_ffa_remove, ++ .id_table = trusty_ffa_device_id, ++}; ++ ++static int __init trusty_ffa_transport_init(void) ++{ ++ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)) { ++ return ffa_register(&trusty_ffa_driver); ++ } else ++ return -ENODEV; ++} ++ ++static void __exit trusty_ffa_transport_exit(void) ++{ ++ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)) ++ ffa_unregister(&trusty_ffa_driver); ++} ++ ++const struct trusty_transport_desc trusty_ffa_transport = { ++ .name = "ffa", ++ .setup = trusty_ffa_transport_setup, ++ .cleanup = trusty_ffa_transport_cleanup, ++}; ++ ++module_init(trusty_ffa_transport_init); ++module_exit(trusty_ffa_transport_exit); +diff --git a/drivers/trusty/trusty-ffa.h b/drivers/trusty/trusty-ffa.h +new file mode 100644 +index 000000000000..267ca2c5db29 +--- /dev/null ++++ b/drivers/trusty/trusty-ffa.h +@@ -0,0 +1,28 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2022 ARM Ltd. ++ */ ++ ++#ifndef __LINUX_TRUSTY_FFA_H ++#define __LINUX_TRUSTY_FFA_H ++ ++#include <linux/types.h> ++#include <linux/uuid.h> ++#include <linux/arm_ffa.h> ++ ++#define TRUSTY_FFA_VERSION_MAJOR (1U) ++#define TRUSTY_FFA_VERSION_MINOR (0U) ++#define TRUSTY_FFA_VERSION_MAJOR_SHIFT (16U) ++#define TRUSTY_FFA_VERSION_MAJOR_MASK (0x7fffU) ++#define TRUSTY_FFA_VERSION_MINOR_SHIFT (0U) ++#define TRUSTY_FFA_VERSION_MINOR_MASK (0U) ++ ++#define TO_TRUSTY_FFA_MAJOR(v) \ ++ ((u16)((v >> TRUSTY_FFA_VERSION_MAJOR_SHIFT) & \ ++ TRUSTY_FFA_VERSION_MAJOR_MASK)) ++ ++#define TO_TRUSTY_FFA_MINOR(v) \ ++ ((u16)((v >> TRUSTY_FFA_VERSION_MINOR_SHIFT) & \ ++ TRUSTY_FFA_VERSION_MINOR_MASK)) ++ ++#endif /* __LINUX_TRUSTY_FFA_H */ +diff --git a/drivers/trusty/trusty-private.h b/drivers/trusty/trusty-private.h +index 74b88bb8f83b..2496f397e5d2 100644 +--- a/drivers/trusty/trusty-private.h ++++ b/drivers/trusty/trusty-private.h +@@ -73,5 +73,6 @@ int trusty_init_api_version(struct trusty_state *s, struct device *dev, + typedef const struct trusty_transport_desc *trusty_transports_t; + + extern const struct trusty_transport_desc trusty_smc_transport; ++extern const struct trusty_transport_desc trusty_ffa_transport; + + #endif /* _TRUSTY_PRIVATE_H */ +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index ec0fccfaa24c..4686b0d34f61 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -509,6 +509,11 @@ trusty_transports_setup(const trusty_transports_t *transports, + return -EINVAL; + + ret = transport->setup(dev); ++ if (ret == -EPROBE_DEFER) { ++ dev_notice(dev, "transport %s: defer probe\n", ++ transport->name); ++ return ret; ++ } + transports_ret &= ret; + } + +@@ -672,6 +677,7 @@ static int trusty_remove(struct platform_device *pdev) + */ + static const trusty_transports_t trusty_transports[] = { + &trusty_smc_transport, ++ &trusty_ffa_transport, + NULL, + }; + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0042-ANDROID-trusty-ffa-Add-support-for-FFA-memory-operat.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0042-ANDROID-trusty-ffa-Add-support-for-FFA-memory-operat.patch new file mode 100644 index 0000000000..2b1500987d --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0042-ANDROID-trusty-ffa-Add-support-for-FFA-memory-operat.patch @@ -0,0 +1,151 @@ +From c552f8ed6bbd68e838732598ca74055bb696dcb3 Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Tue, 18 Jan 2022 15:11:46 +0000 +Subject: [PATCH 29/32] ANDROID: trusty-ffa: Add support for FFA memory + operations + +Initializes Trusty mem_ops with FFA memory operations for share, +lend, reclaim. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Id3a1eb5ae8e4721cb983c624773d39bafe25a77d +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/trusty-ffa.c | 102 ++++++++++++++++++++++++++++++++++++ + drivers/trusty/trusty.c | 5 ++ + 2 files changed, 107 insertions(+) + +diff --git a/drivers/trusty/trusty-ffa.c b/drivers/trusty/trusty-ffa.c +index c8c16a1fc700..0655b3887b52 100644 +--- a/drivers/trusty/trusty-ffa.c ++++ b/drivers/trusty/trusty-ffa.c +@@ -15,8 +15,110 @@ + #include "trusty-ffa.h" + #include "trusty-private.h" + ++static int __trusty_ffa_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, ++ u64 tag, bool share) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ int ret; ++ struct scatterlist *sg; ++ size_t count; ++ struct ffa_device *ffa_dev = to_ffa_dev(s->ffa->dev); ++ const struct ffa_dev_ops *ffa_ops = s->ffa->ops; ++ struct ffa_mem_region_attributes ffa_mem_attr; ++ struct ffa_mem_ops_args ffa_mem_args; ++ ++ if (WARN_ON(nents < 1)) ++ return -EINVAL; ++ ++ count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ if (count != nents) { ++ dev_err(s->dev, "failed to dma map sg_table\n"); ++ return -EINVAL; ++ } ++ ++ sg = sglist; ++ ++ mutex_lock(&s->ffa->share_memory_msg_lock); ++ ++ ffa_mem_attr.receiver = ffa_dev->vm_id; ++ ffa_mem_attr.attrs = FFA_MEM_RW; ++ ++ ffa_mem_args.use_txbuf = 1; ++ ffa_mem_args.tag = tag; ++ ffa_mem_args.attrs = &ffa_mem_attr; ++ ffa_mem_args.nattrs = 1; ++ ffa_mem_args.sg = sg; ++ ffa_mem_args.flags = 0; ++ ++ if (share) { ++ ret = ffa_ops->memory_share(ffa_dev, &ffa_mem_args); ++ } else { ++ ret = ffa_ops->memory_lend(ffa_dev, &ffa_mem_args); ++ } ++ ++ mutex_unlock(&s->ffa->share_memory_msg_lock); ++ ++ if (!ret) { ++ *id = ffa_mem_args.g_handle; ++ dev_dbg(s->dev, "%s: done\n", __func__); ++ return 0; ++ } ++ ++ dev_err(s->dev, "%s: failed %d", __func__, ret); ++ ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ return ret; ++} ++ ++static int trusty_ffa_share_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, u64 tag) ++{ ++ return __trusty_ffa_share_memory(dev, id, sglist, nents, pgprot, tag, ++ true); ++} ++ ++static int trusty_ffa_lend_memory(struct device *dev, u64 *id, ++ struct scatterlist *sglist, ++ unsigned int nents, pgprot_t pgprot, u64 tag) ++{ ++ return __trusty_ffa_share_memory(dev, id, sglist, nents, pgprot, tag, ++ false); ++} ++ ++static int trusty_ffa_reclaim_memory(struct device *dev, u64 id, ++ struct scatterlist *sglist, ++ unsigned int nents) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ int ret = 0; ++ const struct ffa_dev_ops *ffa_ops = s->ffa->ops; ++ ++ if (WARN_ON(nents < 1)) ++ return -EINVAL; ++ ++ mutex_lock(&s->ffa->share_memory_msg_lock); ++ ++ ret = ffa_ops->memory_reclaim(id, 0); ++ ++ mutex_unlock(&s->ffa->share_memory_msg_lock); ++ ++ if (ret != 0) ++ return ret; ++ ++ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL); ++ ++ dev_dbg(s->dev, "%s: done\n", __func__); ++ return 0; ++} ++ + static const struct trusty_mem_ops trusty_ffa_mem_ops = { + .desc = &trusty_ffa_transport, ++ .trusty_share_memory = &trusty_ffa_share_memory, ++ .trusty_lend_memory = &trusty_ffa_lend_memory, ++ .trusty_reclaim_memory = &trusty_ffa_reclaim_memory, + }; + + static const struct ffa_device_id trusty_ffa_device_id[] = { +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 4686b0d34f61..f91c255c9897 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -674,6 +674,11 @@ static int trusty_remove(struct platform_device *pdev) + * + * For Trusty API version < TRUSTY_API_VERSION_MEM_OBJ: + * trusty_smc_transport used for messaging. ++ * ++ * For Trusty API version == TRUSTY_API_VERSION_MEM_OBJ: ++ * trusty_smc_transport used for messaging. ++ * trusty_ffa_transport used for memory sharing. ++ * + */ + static const trusty_transports_t trusty_transports[] = { + &trusty_smc_transport, +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0043-ANDROID-trusty-ffa-Enable-FFA-transport-for-both-mem.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0043-ANDROID-trusty-ffa-Enable-FFA-transport-for-both-mem.patch new file mode 100644 index 0000000000..2c1623ac74 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0043-ANDROID-trusty-ffa-Enable-FFA-transport-for-both-mem.patch @@ -0,0 +1,142 @@ +From e67cd78142984c7c4120f15ef14e1e026746af5a Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Thu, 3 Feb 2022 11:19:38 +0000 +Subject: [PATCH 30/32] ANDROID: trusty-ffa: Enable FFA transport for both + memory and message ops + +Adds new API version TRUSTY_API_VERSION_FFA and sets this as current +API version. + +If Trusty on the secure side supports receipt of FFA direct request, +then trusty core uses FFA calls for messages and memory operations. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: I4a8b060f906a96935a7df10713026fb543e2b9a7 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/trusty-ffa.c | 58 +++++++++++++++++++++++++++++++++++ + drivers/trusty/trusty.c | 3 ++ + include/linux/trusty/smcall.h | 3 +- + 3 files changed, 63 insertions(+), 1 deletion(-) + +diff --git a/drivers/trusty/trusty-ffa.c b/drivers/trusty/trusty-ffa.c +index 0655b3887b52..543f5a0d31cb 100644 +--- a/drivers/trusty/trusty-ffa.c ++++ b/drivers/trusty/trusty-ffa.c +@@ -15,6 +15,36 @@ + #include "trusty-ffa.h" + #include "trusty-private.h" + ++/* partition property: Supports receipt of direct requests */ ++#define FFA_PARTITION_DIRECT_REQ_RECV BIT(0) ++ ++/* string representation of trusty UUID used for partition info get call */ ++static const char *trusty_uuid = "40ee25f0-a2bc-304c-8c4c-a173c57d8af1"; ++ ++static u32 trusty_ffa_send_direct_msg(struct device *dev, unsigned long fid, ++ unsigned long a0, unsigned long a1, ++ unsigned long a2) ++{ ++ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); ++ struct ffa_send_direct_data ffa_msg; ++ struct ffa_device *ffa_dev; ++ int ret; ++ ++ ffa_dev = to_ffa_dev(s->ffa->dev); ++ ++ ffa_msg.data0 = fid; ++ ffa_msg.data1 = a0; ++ ffa_msg.data2 = a1; ++ ffa_msg.data3 = a2; ++ ffa_msg.data4 = 0; ++ ++ ret = s->ffa->ops->sync_send_receive(ffa_dev, &ffa_msg); ++ if (!ret) ++ return ffa_msg.data0; ++ ++ return ret; ++} ++ + static int __trusty_ffa_share_memory(struct device *dev, u64 *id, + struct scatterlist *sglist, + unsigned int nents, pgprot_t pgprot, +@@ -114,6 +144,11 @@ static int trusty_ffa_reclaim_memory(struct device *dev, u64 id, + return 0; + } + ++static const struct trusty_msg_ops trusty_ffa_msg_ops = { ++ .desc = &trusty_ffa_transport, ++ .send_direct_msg = &trusty_ffa_send_direct_msg, ++}; ++ + static const struct trusty_mem_ops trusty_ffa_mem_ops = { + .desc = &trusty_ffa_transport, + .trusty_share_memory = &trusty_ffa_share_memory, +@@ -181,6 +216,7 @@ static int trusty_ffa_transport_setup(struct device *dev) + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + struct trusty_ffa_state *ffa_state; + struct ffa_device *ffa_dev; ++ struct ffa_partition_info pinfo = { 0 }; + + /* ffa transport not required for lower api versions */ + if (s->api_version != 0 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) { +@@ -208,6 +244,28 @@ static int trusty_ffa_transport_setup(struct device *dev) + return 0; + } + ++ /* check if Trusty partition can support receipt of direct requests. */ ++ rc = ffa_state->ops->partition_info_get(trusty_uuid, &pinfo); ++ if (rc || !(pinfo.properties & FFA_PARTITION_DIRECT_REQ_RECV)) { ++ dev_err(ffa_state->dev, "trusty_ffa_pinfo: ret: 0x%x, prop: 0x%x\n", ++ rc, pinfo.properties); ++ return -EINVAL; ++ } ++ ++ /* query and check Trusty API version */ ++ s->ffa = ffa_state; ++ rc = trusty_init_api_version(s, dev, trusty_ffa_send_direct_msg); ++ if (rc) { ++ s->ffa = NULL; ++ return -EINVAL; ++ } ++ ++ if (s->api_version == TRUSTY_API_VERSION_FFA) { ++ s->msg_ops = &trusty_ffa_msg_ops; ++ s->mem_ops = &trusty_ffa_mem_ops; ++ return 0; ++ } ++ + return -EINVAL; + } + +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index f91c255c9897..66273873f169 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -679,6 +679,9 @@ static int trusty_remove(struct platform_device *pdev) + * trusty_smc_transport used for messaging. + * trusty_ffa_transport used for memory sharing. + * ++ * For Trusty API version > TRUSTY_API_VERSION_MEM_OBJ: ++ * trusty_ffa_transport used for messaging and memory sharing operations. ++ * + */ + static const trusty_transports_t trusty_transports[] = { + &trusty_smc_transport, +diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h +index aea3f6068593..17b3d1c2c4f6 100644 +--- a/include/linux/trusty/smcall.h ++++ b/include/linux/trusty/smcall.h +@@ -109,7 +109,8 @@ + #define TRUSTY_API_VERSION_SMP_NOP (3) + #define TRUSTY_API_VERSION_PHYS_MEM_OBJ (4) + #define TRUSTY_API_VERSION_MEM_OBJ (5) +-#define TRUSTY_API_VERSION_CURRENT (5) ++#define TRUSTY_API_VERSION_FFA (6) ++#define TRUSTY_API_VERSION_CURRENT (6) + #define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11) + + /* TRUSTED_OS entity calls */ +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0044-ANDROID-trusty-Make-trusty-transports-configurable.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0044-ANDROID-trusty-Make-trusty-transports-configurable.patch new file mode 100644 index 0000000000..3076eca749 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/0044-ANDROID-trusty-Make-trusty-transports-configurable.patch @@ -0,0 +1,146 @@ +From 088162ab1852aa0f2034199e97a327b6240231db Mon Sep 17 00:00:00 2001 +From: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Date: Wed, 16 Mar 2022 11:14:09 +0000 +Subject: [PATCH 31/32] ANDROID: trusty: Make trusty transports configurable + +With TRUSTY_SMC_TRANSPORT set to 'y', SMC based message passing and +memory sharing support will be compiled in to trusty core. + +With TRUSTY_FFA_TRANSPORT set to 'y', FFA based message passing and +memory sharing support will be compiled in to trusty core. This +depends on ARM FF-A driver (ARM_FFA_TRANSPORT). + +Enabling any of the transport sets config TRUSTY_HAVE_TRANSPORT to 'y'. +Not enabling any of the transport causes the build to break. + +Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com> +Change-Id: Ib5bbf0d39202e6897700264d14371ae33101c1d1 +Upstream-Status: Pending [Not submitted to upstream yet] +--- + drivers/trusty/Kconfig | 30 ++++++++++++++++++++++++++++++ + drivers/trusty/Makefile | 26 +++++++++++++++----------- + drivers/trusty/trusty-private.h | 4 ++++ + drivers/trusty/trusty.c | 7 +++++++ + 4 files changed, 56 insertions(+), 11 deletions(-) + +diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig +index fcde7f097acf..260022e4595b 100644 +--- a/drivers/trusty/Kconfig ++++ b/drivers/trusty/Kconfig +@@ -21,6 +21,36 @@ config TRUSTY + + if TRUSTY + ++config TRUSTY_HAVE_TRANSPORT ++ bool ++ help ++ If any of the Trusty transport is enabled then it sets this config ++ option. This variable is used to break the build when none of the ++ Trusty transports are enabled. ++ ++config TRUSTY_SMC_TRANSPORT ++ bool "Trusty transport based on SMC" ++ select TRUSTY_HAVE_TRANSPORT ++ default n ++ help ++ Enable SMC based transport for Trusty. This transport is required for ++ Trusty API version <= TRUSTY_API_VERSION_MEM_OBJ. ++ ++ If you want to use legacy SMC based transport for sending Trusty ++ messages to secure world, answer Y. ++ ++config TRUSTY_FFA_TRANSPORT ++ bool "Trusty transport based on FFA" ++ select TRUSTY_HAVE_TRANSPORT ++ depends on ARM_FFA_TRANSPORT ++ default y ++ help ++ Enable ARM FF-A based transport for Trusty. This transport is required ++ for Trusty API version >= TRUSTY_API_VERSION_MEM_OBJ. ++ ++ If you want to use ARM FF-A based transport for sending Trusty messages ++ to secure world, answer Y. ++ + config TRUSTY_IRQ + tristate "Trusty IRQ support" + default y +diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile +index 797d61bf68ef..104a4d0ed35c 100644 +--- a/drivers/trusty/Makefile ++++ b/drivers/trusty/Makefile +@@ -3,14 +3,18 @@ + # Makefile for trusty components + # + +-obj-$(CONFIG_TRUSTY) += trusty-core.o +-trusty-core-objs += trusty.o trusty-mem.o +-trusty-core-objs += trusty-smc.o +-trusty-core-objs += trusty-ffa.o +-trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o +-trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o +-obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o +-obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o +-obj-$(CONFIG_TRUSTY_TEST) += trusty-test.o +-obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o +-obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o ++obj-$(CONFIG_TRUSTY) += trusty-core.o ++trusty-core-objs += trusty.o ++trusty-arm-smc-$(CONFIG_ARM) += trusty-smc-arm.o ++trusty-arm-smc64-$(CONFIG_ARM64) += trusty-smc-arm64.o ++trusty-transport-$(CONFIG_TRUSTY_SMC_TRANSPORT) += trusty-smc.o ++trusty-transport-$(CONFIG_TRUSTY_SMC_TRANSPORT) += trusty-mem.o ++trusty-transport-$(CONFIG_TRUSTY_SMC_TRANSPORT) += $(trusty-arm-smc-y) ++trusty-transport-$(CONFIG_TRUSTY_SMC_TRANSPORT) += $(trusty-arm-smc64-y) ++trusty-transport-$(CONFIG_TRUSTY_FFA_TRANSPORT) += trusty-ffa.o ++trusty-core-objs += $(trusty-transport-y) ++obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o ++obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o ++obj-$(CONFIG_TRUSTY_TEST) += trusty-test.o ++obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o ++obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o +diff --git a/drivers/trusty/trusty-private.h b/drivers/trusty/trusty-private.h +index 2496f397e5d2..386ca9ae5af3 100644 +--- a/drivers/trusty/trusty-private.h ++++ b/drivers/trusty/trusty-private.h +@@ -72,7 +72,11 @@ int trusty_init_api_version(struct trusty_state *s, struct device *dev, + + typedef const struct trusty_transport_desc *trusty_transports_t; + ++#ifdef CONFIG_TRUSTY_SMC_TRANSPORT + extern const struct trusty_transport_desc trusty_smc_transport; ++#endif ++#ifdef CONFIG_TRUSTY_FFA_TRANSPORT + extern const struct trusty_transport_desc trusty_ffa_transport; ++#endif + + #endif /* _TRUSTY_PRIVATE_H */ +diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c +index 66273873f169..06698f3c67f9 100644 +--- a/drivers/trusty/trusty.c ++++ b/drivers/trusty/trusty.c +@@ -684,8 +684,12 @@ static int trusty_remove(struct platform_device *pdev) + * + */ + static const trusty_transports_t trusty_transports[] = { ++#ifdef CONFIG_TRUSTY_SMC_TRANSPORT + &trusty_smc_transport, ++#endif ++#ifdef CONFIG_TRUSTY_FFA_TRANSPORT + &trusty_ffa_transport, ++#endif + NULL, + }; + +@@ -708,6 +712,9 @@ static struct platform_driver trusty_driver = { + + static int __init trusty_driver_init(void) + { ++ BUILD_BUG_ON_MSG(!IS_ENABLED(CONFIG_TRUSTY_HAVE_TRANSPORT), ++ "Trusty transport not configured"); ++ + return platform_driver_register(&trusty_driver); + } + +-- +2.30.2 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/gki_defconfig b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/gki_defconfig new file mode 100644 index 0000000000..30bd964dd2 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-5.10/tc/gki_defconfig @@ -0,0 +1,689 @@ +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_UCLAMP_TASK=y +CONFIG_UCLAMP_BUCKETS_COUNT=20 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_SCHED=y +CONFIG_UCLAMP_TASK_GROUP=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +# CONFIG_PID_NS is not set +CONFIG_RT_SOFTINT_OPTIMIZATION=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_BOOT_CONFIG=y +# CONFIG_SYSFS_SYSCALL is not set +# CONFIG_FHANDLE is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_USERFAULTFD=y +# CONFIG_RSEQ is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +CONFIG_PROFILING=y +# CONFIG_ZONE_DMA is not set +CONFIG_ARCH_SUNXI=y +CONFIG_ARCH_HISI=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SPRD=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=32 +CONFIG_PARAVIRT=y +CONFIG_ARM64_SW_TTBR0_PAN=y +CONFIG_COMPAT=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_RANDOMIZE_BASE=y +# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set +CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off kvm-arm.mode=protected cgroup_disable=pressure cgroup.memory=nokmem" +CONFIG_CMDLINE_EXTEND=y +# CONFIG_DMI is not set +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_ENERGY_MODEL=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_TEO=y +CONFIG_ARM_CPUIDLE=y +CONFIG_ARM_PSCI_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_TIMES=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_ARM_SCPI_CPUFREQ=y +CONFIG_ARM_SCMI_CPUFREQ=y +CONFIG_ARM_SCMI_PROTOCOL=y +# CONFIG_ARM_SCMI_POWER_DOMAIN is not set +CONFIG_ARM_SCPI_PROTOCOL=y +# CONFIG_ARM_SCPI_POWER_DOMAIN is not set +# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_SHADOW_CALL_STACK=y +CONFIG_LTO_CLANG_FULL=y +CONFIG_CFI_CLANG=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SCMVERSION=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_GKI_HACKS_TO_FIX=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_CMA_SYSFS=y +CONFIG_CMA_AREAS=16 +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_NET_IPIP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IPGRE=y +CONFIG_NET_IPVTI=y +CONFIG_INET_ESP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_GRE=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_DSCP=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_TIPC=y +CONFIG_L2TP=y +CONFIG_BRIDGE=y +CONFIG_6LOWPAN=y +CONFIG_IEEE802154=y +CONFIG_IEEE802154_6LOWPAN=y +CONFIG_MAC802154=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_SFQ=y +CONFIG_NET_SCH_TBF=y +CONFIG_NET_SCH_NETEM=y +CONFIG_NET_SCH_CODEL=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_FQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_BASIC=y +CONFIG_NET_CLS_TCINDEX=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_CLS_MATCHALL=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_VSOCKETS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_CAN=y +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIBTSDIO=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIUART_BCM=y +CONFIG_BT_HCIUART_QCA=y +CONFIG_RFKILL=y +CONFIG_NFC=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +CONFIG_PCI_IOV=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCIE_DW_PLAT_EP=y +CONFIG_PCIE_QCOM=y +CONFIG_PCIE_KIRIN=y +CONFIG_PCI_ENDPOINT=y +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_CACHE is not set +# CONFIG_SUN50I_DE2_BUS is not set +# CONFIG_SUNXI_RSB is not set +CONFIG_GNSS=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_SRAM=y +CONFIG_UID_SYS_STATS=y +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y +CONFIG_SCSI_UFS_HISI=y +CONFIG_SCSI_UFS_BSG=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_HPB=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_WIREGUARD=y +CONFIG_IFB=y +CONFIG_TUN=y +CONFIG_VETH=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_USB_RTL8150=y +CONFIG_USB_RTL8152=y +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=y +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_AQC111=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_MOUSE_PS2 is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_EXAR is not set +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_SAMSUNG=y +CONFIG_SERIAL_SAMSUNG_CONSOLE=y +CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE=y +CONFIG_SERIAL_SPRD=y +CONFIG_SERIAL_SPRD_CONSOLE=y +CONFIG_HVC_DCC=y +CONFIG_HVC_DCC_SERIALIZE_SMP=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_CAVIUM is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVPORT is not set +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I3C=y +CONFIG_SPI=y +CONFIG_SPMI=y +# CONFIG_SPMI_MSM_PMIC_ARB is not set +# CONFIG_PINCTRL_SUN8I_H3_R is not set +# CONFIG_PINCTRL_SUN50I_A64 is not set +# CONFIG_PINCTRL_SUN50I_A64_R is not set +# CONFIG_PINCTRL_SUN50I_H5 is not set +# CONFIG_PINCTRL_SUN50I_H6 is not set +# CONFIG_PINCTRL_SUN50I_H6_R is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_HWMON is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_NETLINK=y +CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100 +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_THERMAL_EMULATION=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_MFD_ACT8945A=y +CONFIG_MFD_SYSCON=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_RC_CORE=y +# CONFIG_RC_MAP is not set +CONFIG_LIRC=y +CONFIG_BPF_LIRC_MODE2=y +CONFIG_RC_DECODERS=y +CONFIG_RC_DEVICES=y +CONFIG_MEDIA_CEC_RC=y +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_USB_GSPCA=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_V4L_MEM2MEM_DRIVERS=y +# CONFIG_VGA_ARB is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_HRTIMER=y +CONFIG_SND_DYNAMIC_MINORS=y +# CONFIG_SND_SUPPORT_OLD_API is not set +# CONFIG_SND_DRIVERS is not set +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_BETOP_FF=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_ELECOM=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PLAYSTATION=y +CONFIG_PLAYSTATION_FF=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SONY=y +CONFIG_SONY_FF=y +CONFIG_HID_STEAM=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_OTG=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=y +CONFIG_USB_DWC3=y +CONFIG_USB_GADGET=y +CONFIG_USB_DUMMY_HCD=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_ACM=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_EEM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +CONFIG_TYPEC_TCPCI=y +CONFIG_TYPEC_UCSI=y +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_CRYPTO=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_LEDS_CLASS_FLASH=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL030=y +CONFIG_RTC_DRV_PL031=y +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_SYSFS_STATS=y +CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y +CONFIG_DMABUF_HEAPS_PAGE_POOL=y +CONFIG_UIO=y +CONFIG_VHOST_VSOCK=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_DEBUG_KINFO=y +CONFIG_COMMON_CLK_SCPI=y +# CONFIG_SPRD_COMMON_CLK is not set +# CONFIG_CLK_SUNXI is not set +# CONFIG_SUNXI_CCU is not set +CONFIG_HWSPINLOCK=y +CONFIG_SUN4I_TIMER=y +# CONFIG_SUN50I_ERRATUM_UNKNOWN1 is not set +CONFIG_MTK_TIMER=y +CONFIG_MAILBOX=y +CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT=y +CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y +CONFIG_REMOTEPROC=y +CONFIG_REMOTEPROC_CDEV=y +CONFIG_RPMSG_CHAR=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_PM_DEVFREQ_EVENT=y +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +CONFIG_IIO_TRIGGER=y +CONFIG_PWM=y +CONFIG_GENERIC_PHY=y +CONFIG_POWERCAP=y +CONFIG_DTPM=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y +CONFIG_ANDROID_DEBUG_SYMBOLS=y +CONFIG_ANDROID_VENDOR_HOOKS=y +CONFIG_LIBNVDIMM=y +# CONFIG_ND_BLK is not set +CONFIG_INTERCONNECT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_COMPRESSION=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +# CONFIG_DNOTIFY is not set +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_VIRTIO_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_INCREMENTAL_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_EXFAT_FS=y +CONFIG_TMPFS=y +# CONFIG_EFIVAR_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_PMSG=y +CONFIG_PSTORE_RAM=y +CONFIG_EROFS_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=y +CONFIG_NLS_CODEPAGE_775=y +CONFIG_NLS_CODEPAGE_850=y +CONFIG_NLS_CODEPAGE_852=y +CONFIG_NLS_CODEPAGE_855=y +CONFIG_NLS_CODEPAGE_857=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_CODEPAGE_861=y +CONFIG_NLS_CODEPAGE_862=y +CONFIG_NLS_CODEPAGE_863=y +CONFIG_NLS_CODEPAGE_864=y +CONFIG_NLS_CODEPAGE_865=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_869=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_CODEPAGE_949=y +CONFIG_NLS_CODEPAGE_874=y +CONFIG_NLS_ISO8859_8=y +CONFIG_NLS_CODEPAGE_1250=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=y +CONFIG_NLS_ISO8859_3=y +CONFIG_NLS_ISO8859_4=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_ISO8859_6=y +CONFIG_NLS_ISO8859_7=y +CONFIG_NLS_ISO8859_9=y +CONFIG_NLS_ISO8859_13=y +CONFIG_NLS_ISO8859_14=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_KOI8_U=y +CONFIG_NLS_MAC_ROMAN=y +CONFIG_NLS_MAC_CELTIC=y +CONFIG_NLS_MAC_CENTEURO=y +CONFIG_NLS_MAC_CROATIAN=y +CONFIG_NLS_MAC_CYRILLIC=y +CONFIG_NLS_MAC_GAELIC=y +CONFIG_NLS_MAC_GREEK=y +CONFIG_NLS_MAC_ICELAND=y +CONFIG_NLS_MAC_INUIT=y +CONFIG_NLS_MAC_ROMANIAN=y +CONFIG_NLS_MAC_TURKISH=y +CONFIG_NLS_UTF8=y +CONFIG_UNICODE=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_HARDENED_USERCOPY=y +# CONFIG_HARDENED_USERCOPY_FALLBACK is not set +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="" +CONFIG_SECURITY_SELINUX=y +CONFIG_INIT_STACK_ALL_ZERO=y +CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +CONFIG_CRYPTO_CHACHA20POLY1305=y +CONFIG_CRYPTO_ADIANTUM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_ZSTD=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRC8=y +CONFIG_XZ_DEC=y +CONFIG_DMA_CMA=y +CONFIG_STACK_HASH_ORDER=12 +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_HEADERS_INSTALL=y +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_UBSAN=y +CONFIG_UBSAN_TRAP=y +CONFIG_UBSAN_LOCAL_BOUNDS=y +# CONFIG_UBSAN_MISC is not set +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_PINNER=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_KASAN=y +CONFIG_KASAN_HW_TAGS=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=500 +CONFIG_KFENCE_NUM_OBJECTS=63 +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=-1 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_WQ_WATCHDOG=y +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_TRACE_MMIO_ACCESS=y +CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-clang.inc b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-clang.inc new file mode 100644 index 0000000000..c5b746341f --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack-clang.inc @@ -0,0 +1,8 @@ +# Clang-specific configuration of kernel build + +# We need to add this dependency as the kernel configuration depends on the compiler +do_kernel_configme[depends] += "androidclang-native:do_populate_sysroot" + +DEPENDS:append = " androidclang-native" + +KERNEL_CC = "${CCACHE}clang ${HOST_CC_KERNEL_ARCH}" diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack_%.bbappend b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack_%.bbappend new file mode 100644 index 0000000000..301041bc13 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack_%.bbappend @@ -0,0 +1,3 @@ +LINUX_ARM64_ACK_TOOLCHAIN_REQUIRE = "${@oe.utils.ifelse(d.getVar('LINUX_ACK_TOOLCHAIN_CLANG'), 'linux-arm64-ack-clang.inc', '')}" + +require ${LINUX_ARM64_ACK_TOOLCHAIN_REQUIRE} diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack_5.10.bbappend b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack_5.10.bbappend new file mode 100644 index 0000000000..e2f9145c47 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-arm64-ack_5.10.bbappend @@ -0,0 +1,5 @@ +# Machine specific configurations + +FILESEXTRAPATHS:prepend := "${THISDIR}/${BP}:" + +require linux-arm-platforms.inc diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0001-iommu-arm-smmu-v3-workaround-for-ATC_INV_SIZE_ALL-in.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0001-iommu-arm-smmu-v3-workaround-for-ATC_INV_SIZE_ALL-in.patch new file mode 100644 index 0000000000..a75ca247dc --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0001-iommu-arm-smmu-v3-workaround-for-ATC_INV_SIZE_ALL-in.patch @@ -0,0 +1,49 @@ +Upstream-Status: Inappropriate [Workaround] +Signed-off-by: Manoj Kumar <manoj.kumar3@arm.com> + +From 949ba3f12ec1f3177a82a9228dc402ab5d8c9d60 Mon Sep 17 00:00:00 2001 +From: Manoj Kumar <manoj.kumar3@arm.com> +Date: Mon, 1 Feb 2021 21:36:43 +0530 +Subject: [PATCH 1/5] iommu/arm-smmu-v3: workaround for ATC_INV_SIZE_ALL in + N1SDP + +ATC_INV_SIZE_ALL request should automatically translate to ATS +address which is not happening in SMMUv3 version gone into +N1SDP platform. This workaround manually sets the ATS address +field to proper value for ATC_INV_SIZE_ALL command. + +Change-Id: If89465be94720a62be85e1e6612f17e93fa9b8a5 +Signed-off-by: Manoj Kumar <manoj.kumar3@arm.com> +Signed-off-by: Khasim Syed Mohammed <khasim.mohammed@arm.com> +--- + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 1 + + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +index a388e318f86e..ceca576b0bf6 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +@@ -1724,6 +1724,7 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, + }; + + if (!size) { ++ cmd->atc.addr = ATC_INV_ADDR_ALL; + cmd->atc.size = ATC_INV_SIZE_ALL; + return; + } +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +index 4cb136f07914..5615ffd24e46 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +@@ -473,6 +473,7 @@ struct arm_smmu_cmdq_ent { + + #define CMDQ_OP_ATC_INV 0x40 + #define ATC_INV_SIZE_ALL 52 ++ #define ATC_INV_ADDR_ALL 0x7FFFFFFFFFFFF000UL + struct { + u32 sid; + u32 ssid; +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0002-n1sdp-pci_quirk-add-acs-override-for-PCI-devices.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0002-n1sdp-pci_quirk-add-acs-override-for-PCI-devices.patch new file mode 100644 index 0000000000..a6284098b1 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0002-n1sdp-pci_quirk-add-acs-override-for-PCI-devices.patch @@ -0,0 +1,160 @@ +Upstream-Status: Inappropriate [will not be submitted as its a workaround to address hardware issue] +Signed-off-by: Khasim Syed Mohammed <khasim.mohammed@arm.com> + +From e47ab593ee36b2480f8c2196722cded42749629a Mon Sep 17 00:00:00 2001 +From: Manoj Kumar <manoj.kumar3@arm.com> +Date: Tue, 31 Aug 2021 16:15:38 +0000 +Subject: [PATCH 2/5] n1sdp: pci_quirk: add acs override for PCI devices + +Patch taken from: +https://gitlab.com/Queuecumber/linux-acs-override/raw/master/workspaces/5.4/acso.patch + +Change-Id: Ib926bf50524ce9990fbaa2f2f8670fe84bd571f9 +Signed-off-by: Manoj Kumar <manoj.kumar3@arm.com> +--- + .../admin-guide/kernel-parameters.txt | 8 ++ + drivers/pci/quirks.c | 102 ++++++++++++++++++ + 2 files changed, 110 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 43dc35fe5bc0..a60e454854d7 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3892,6 +3892,14 @@ + nomsi [MSI] If the PCI_MSI kernel config parameter is + enabled, this kernel boot option can be used to + disable the use of MSI interrupts system-wide. ++ pcie_acs_override [PCIE] Override missing PCIe ACS support for ++ downstream ++ All downstream ports - full ACS capabilities ++ multfunction ++ All multifunction devices - multifunction ACS subset ++ id:nnnn:nnnn ++ Specfic device - full ACS capabilities ++ Specified as vid:did (vendor/device ID) in hex + noioapicquirk [APIC] Disable all boot interrupt quirks. + Safety option to keep boot IRQs enabled. This + should never be necessary. +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 4537d1ea14fd..984f30d25a6d 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3588,6 +3588,107 @@ static void quirk_no_bus_reset(struct pci_dev *dev) + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + } + ++static bool acs_on_downstream; ++static bool acs_on_multifunction; ++ ++#define NUM_ACS_IDS 16 ++struct acs_on_id { ++ unsigned short vendor; ++ unsigned short device; ++}; ++static struct acs_on_id acs_on_ids[NUM_ACS_IDS]; ++static u8 max_acs_id; ++ ++static __init int pcie_acs_override_setup(char *p) ++{ ++ if (!p) ++ return -EINVAL; ++ ++ while (*p) { ++ if (!strncmp(p, "downstream", 10)) ++ acs_on_downstream = true; ++ if (!strncmp(p, "multifunction", 13)) ++ acs_on_multifunction = true; ++ if (!strncmp(p, "id:", 3)) { ++ char opt[5]; ++ int ret; ++ long val; ++ ++ if (max_acs_id >= NUM_ACS_IDS - 1) { ++ pr_warn("Out of PCIe ACS override slots (%d)\n", ++ NUM_ACS_IDS); ++ goto next; ++ } ++ ++ p += 3; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].vendor = val; ++ ++ p += strcspn(p, ":"); ++ if (*p != ':') { ++ pr_warn("PCIe ACS invalid ID\n"); ++ goto next; ++ } ++ ++ p++; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].device = val; ++ max_acs_id++; ++ } ++next: ++ p += strcspn(p, ","); ++ if (*p == ',') ++ p++; ++ } ++ ++ if (acs_on_downstream || acs_on_multifunction || max_acs_id) ++ pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n"); ++ ++ return 0; ++} ++early_param("pcie_acs_override", pcie_acs_override_setup); ++ ++static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags) ++{ ++ int i; ++ ++ /* Never override ACS for legacy devices or devices with ACS caps */ ++ if (!pci_is_pcie(dev) || ++ pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS)) ++ return -ENOTTY; ++ ++ for (i = 0; i < max_acs_id; i++) ++ if (acs_on_ids[i].vendor == dev->vendor && ++ acs_on_ids[i].device == dev->device) ++ return 1; ++ ++ switch (pci_pcie_type(dev)) { ++ case PCI_EXP_TYPE_DOWNSTREAM: ++ case PCI_EXP_TYPE_ROOT_PORT: ++ if (acs_on_downstream) ++ return 1; ++ break; ++ case PCI_EXP_TYPE_ENDPOINT: ++ case PCI_EXP_TYPE_UPSTREAM: ++ case PCI_EXP_TYPE_LEG_END: ++ case PCI_EXP_TYPE_RC_END: ++ if (acs_on_multifunction && dev->multifunction) ++ return 1; ++ } ++ ++ return -ENOTTY; ++} ++ + /* + * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be + * prevented for those affected devices. +@@ -4949,6 +5050,7 @@ static const struct pci_dev_acs_enabled { + { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs }, + /* Zhaoxin Root/Downstream Ports */ + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, ++ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides }, + { 0 } + }; + +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0003-pcie-Add-quirk-for-the-Arm-Neoverse-N1SDP-platform.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0003-pcie-Add-quirk-for-the-Arm-Neoverse-N1SDP-platform.patch new file mode 100644 index 0000000000..f0184a0b15 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0003-pcie-Add-quirk-for-the-Arm-Neoverse-N1SDP-platform.patch @@ -0,0 +1,323 @@ +Upstream-Status: Inappropriate [will not be submitted as its a workaround to address hardware issue] +Signed-off-by: Deepak Pandey <Deepak.Pandey@arm.com> + +From 63ee3a71eeb778a632f5683f3b9e404a70760e75 Mon Sep 17 00:00:00 2001 +From: Deepak Pandey <Deepak.Pandey@arm.com> +Date: Fri, 31 May 2019 16:42:43 +0100 +Subject: [PATCH 3/5] pcie: Add quirk for the Arm Neoverse N1SDP platform + +The Arm N1SDP SoC suffers from some PCIe integration issues, most +prominently config space accesses to not existing BDFs being answered +with a bus abort, resulting in an SError. +To mitigate this, the firmware scans the bus before boot (catching the +SErrors) and creates a table with valid BDFs, which acts as a filter for +Linux' config space accesses. + +Add code consulting the table as an ACPI PCIe quirk, also register the +corresponding device tree based description of the host controller. +Also fix the other two minor issues on the way, namely not being fully +ECAM compliant and config space accesses being restricted to 32-bit +accesses only. + +This allows the Arm Neoverse N1SDP board to boot Linux without crashing +and to access *any* devices (there are no platform devices except UART). + +Signed-off-by: Deepak Pandey <Deepak.Pandey@arm.com> +[Sudipto: extend to cover the CCIX root port as well] +Signed-off-by: Sudipto Paul <sudipto.paul@arm.com> +[Andre: fix coding style issues, rewrite some parts, add DT support] +Signed-off-by: Andre Przywara <andre.przywara@arm.com> +--- + arch/arm64/configs/defconfig | 1 + + drivers/acpi/pci_mcfg.c | 7 + + drivers/pci/controller/Kconfig | 11 ++ + drivers/pci/controller/Makefile | 1 + + drivers/pci/controller/pcie-n1sdp.c | 198 ++++++++++++++++++++++++++++ + include/linux/pci-ecam.h | 2 + + 6 files changed, 220 insertions(+) + create mode 100644 drivers/pci/controller/pcie-n1sdp.c + +diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig +index 545197bc0501..57ae850ccdf0 100644 +--- a/arch/arm64/configs/defconfig ++++ b/arch/arm64/configs/defconfig +@@ -212,6 +212,7 @@ CONFIG_NFC_NCI=m + CONFIG_NFC_S3FWRN5_I2C=m + CONFIG_PCI=y + CONFIG_PCIEPORTBUS=y ++CONFIG_PCI_QUIRKS=y + CONFIG_PCI_IOV=y + CONFIG_PCI_PASID=y + CONFIG_HOTPLUG_PCI=y +diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c +index 53cab975f612..f31727da21ac 100644 +--- a/drivers/acpi/pci_mcfg.c ++++ b/drivers/acpi/pci_mcfg.c +@@ -169,6 +169,13 @@ static struct mcfg_fixup mcfg_quirks[] = { + ALTRA_ECAM_QUIRK(1, 13), + ALTRA_ECAM_QUIRK(1, 14), + ALTRA_ECAM_QUIRK(1, 15), ++ ++#define N1SDP_ECAM_MCFG(rev, seg, ops) \ ++ {"ARMLTD", "ARMN1SDP", rev, seg, MCFG_BUS_ANY, ops } ++ ++ /* N1SDP SoC with v1 PCIe controller */ ++ N1SDP_ECAM_MCFG(0x20181101, 0, &pci_n1sdp_pcie_ecam_ops), ++ N1SDP_ECAM_MCFG(0x20181101, 1, &pci_n1sdp_ccix_ecam_ops), + }; + + static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; +diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig +index 326f7d13024f..f9700d037c46 100644 +--- a/drivers/pci/controller/Kconfig ++++ b/drivers/pci/controller/Kconfig +@@ -46,6 +46,17 @@ config PCI_IXP4XX + Say Y here if you want support for the PCI host controller found + in the Intel IXP4xx XScale-based network processor SoC. + ++config PCIE_HOST_N1SDP_ECAM ++ bool "ARM N1SDP PCIe Controller" ++ depends on ARM64 ++ depends on OF || (ACPI && PCI_QUIRKS) ++ select PCI_HOST_COMMON ++ default y if ARCH_VEXPRESS ++ help ++ Say Y here if you want PCIe support for the Arm N1SDP platform. ++ The controller is ECAM compliant, but needs a quirk to workaround ++ an integration issue. ++ + config PCI_TEGRA + bool "NVIDIA Tegra PCIe controller" + depends on ARCH_TEGRA || COMPILE_TEST +diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile +index aaf30b3dcc14..2012ab2b7913 100644 +--- a/drivers/pci/controller/Makefile ++++ b/drivers/pci/controller/Makefile +@@ -37,6 +37,7 @@ obj-$(CONFIG_VMD) += vmd.o + obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o + obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o + obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o ++obj-$(CONFIG_PCIE_HOST_N1SDP_ECAM) += pcie-n1sdp.o + # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW + obj-y += dwc/ + obj-y += mobiveil/ +diff --git a/drivers/pci/controller/pcie-n1sdp.c b/drivers/pci/controller/pcie-n1sdp.c +new file mode 100644 +index 000000000000..408699b9dcb1 +--- /dev/null ++++ b/drivers/pci/controller/pcie-n1sdp.c +@@ -0,0 +1,198 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2018/2019 ARM Ltd. ++ * ++ * This quirk is to mask the following issues: ++ * - PCIE SLVERR: config space accesses to invalid PCIe BDFs cause a bus ++ * error (signalled as an asynchronous SError) ++ * - MCFG BDF mapping: the root complex is mapped separately from the device ++ * config space ++ * - Non 32-bit accesses to config space are not supported. ++ * ++ * At boot time the SCP board firmware creates a discovery table with ++ * the root complex' base address and the valid BDF values, discovered while ++ * scanning the config space and catching the SErrors. ++ * Linux responds only to the EPs listed in this table, returning NULL ++ * for the rest. ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/init.h> ++#include <linux/ioport.h> ++#include <linux/sizes.h> ++#include <linux/of_pci.h> ++#include <linux/of.h> ++#include <linux/pci-ecam.h> ++#include <linux/platform_device.h> ++#include <linux/module.h> ++ ++#include "../pci.h" ++ ++/* Platform specific values as hardcoded in the firmware. */ ++#define AP_NS_SHARED_MEM_BASE 0x06000000 ++#define MAX_SEGMENTS 2 /* Two PCIe root complexes. */ ++#define BDF_TABLE_SIZE SZ_16K ++ ++/* ++ * Shared memory layout as written by the SCP upon boot time: ++ * ---- ++ * Discover data header --> RC base address ++ * \-> BDF Count ++ * Discover data --> BDF 0...n ++ * ---- ++ */ ++struct pcie_discovery_data { ++ u32 rc_base_addr; ++ u32 nr_bdfs; ++ u32 valid_bdfs[0]; ++} *pcie_discovery_data[MAX_SEGMENTS]; ++ ++void __iomem *rc_remapped_addr[MAX_SEGMENTS]; ++ ++/* ++ * map_bus() is called before we do a config space access for a certain ++ * device. We use this to check whether this device is valid, avoiding ++ * config space accesses which would result in an SError otherwise. ++ */ ++static void __iomem *pci_n1sdp_map_bus(struct pci_bus *bus, unsigned int devfn, ++ int where) ++{ ++ struct pci_config_window *cfg = bus->sysdata; ++ unsigned int devfn_shift = cfg->ops->bus_shift - 8; ++ unsigned int busn = bus->number; ++ unsigned int segment = bus->domain_nr; ++ unsigned int bdf_addr; ++ unsigned int table_count, i; ++ struct pci_dev *dev; ++ ++ if (segment >= MAX_SEGMENTS || ++ busn < cfg->busr.start || busn > cfg->busr.end) ++ return NULL; ++ ++ /* The PCIe root complex has a separate config space mapping. */ ++ if (busn == 0 && devfn == 0) ++ return rc_remapped_addr[segment] + where; ++ ++ dev = pci_get_domain_bus_and_slot(segment, busn, devfn); ++ if (dev && dev->is_virtfn) ++ return pci_ecam_map_bus(bus, devfn, where); ++ ++ /* Accesses beyond the vendor ID always go to existing devices. */ ++ if (where > 0) ++ return pci_ecam_map_bus(bus, devfn, where); ++ ++ busn -= cfg->busr.start; ++ bdf_addr = (busn << cfg->ops->bus_shift) + (devfn << devfn_shift); ++ table_count = pcie_discovery_data[segment]->nr_bdfs; ++ for (i = 0; i < table_count; i++) { ++ if (bdf_addr == pcie_discovery_data[segment]->valid_bdfs[i]) ++ return pci_ecam_map_bus(bus, devfn, where); ++ } ++ ++ return NULL; ++} ++ ++static int pci_n1sdp_init(struct pci_config_window *cfg, unsigned int segment) ++{ ++ phys_addr_t table_base; ++ struct device *dev = cfg->parent; ++ struct pcie_discovery_data *shared_data; ++ size_t bdfs_size; ++ ++ if (segment >= MAX_SEGMENTS) ++ return -ENODEV; ++ ++ table_base = AP_NS_SHARED_MEM_BASE + segment * BDF_TABLE_SIZE; ++ ++ if (!request_mem_region(table_base, BDF_TABLE_SIZE, ++ "PCIe valid BDFs")) { ++ dev_err(dev, "PCIe BDF shared region request failed\n"); ++ return -ENOMEM; ++ } ++ ++ shared_data = devm_ioremap(dev, ++ table_base, BDF_TABLE_SIZE); ++ if (!shared_data) ++ return -ENOMEM; ++ ++ /* Copy the valid BDFs structure to allocated normal memory. */ ++ bdfs_size = sizeof(struct pcie_discovery_data) + ++ sizeof(u32) * shared_data->nr_bdfs; ++ pcie_discovery_data[segment] = devm_kmalloc(dev, bdfs_size, GFP_KERNEL); ++ if (!pcie_discovery_data[segment]) ++ return -ENOMEM; ++ ++ memcpy_fromio(pcie_discovery_data[segment], shared_data, bdfs_size); ++ ++ rc_remapped_addr[segment] = devm_ioremap(dev, ++ shared_data->rc_base_addr, ++ PCI_CFG_SPACE_EXP_SIZE); ++ if (!rc_remapped_addr[segment]) { ++ dev_err(dev, "Cannot remap root port base\n"); ++ return -ENOMEM; ++ } ++ ++ devm_iounmap(dev, shared_data); ++ ++ return 0; ++} ++ ++/* Called for ACPI segment 0, and for all segments when using DT. */ ++static int pci_n1sdp_pcie_init(struct pci_config_window *cfg) ++{ ++ struct platform_device *pdev = to_platform_device(cfg->parent); ++ int segment = 0; ++ ++ if (pdev->dev.of_node) ++ segment = of_get_pci_domain_nr(pdev->dev.of_node); ++ if (segment < 0 || segment > MAX_SEGMENTS) { ++ dev_err(&pdev->dev, "N1SDP PCI controllers require linux,pci-domain property\n"); ++ dev_err(&pdev->dev, "Or invalid segment number, must be smaller than %d\n", ++ MAX_SEGMENTS); ++ return -EINVAL; ++ } ++ ++ return pci_n1sdp_init(cfg, segment); ++} ++ ++/* Called for ACPI segment 1. */ ++static int pci_n1sdp_ccix_init(struct pci_config_window *cfg) ++{ ++ return pci_n1sdp_init(cfg, 1); ++} ++ ++const struct pci_ecam_ops pci_n1sdp_pcie_ecam_ops = { ++ .bus_shift = 20, ++ .init = pci_n1sdp_pcie_init, ++ .pci_ops = { ++ .map_bus = pci_n1sdp_map_bus, ++ .read = pci_generic_config_read32, ++ .write = pci_generic_config_write32, ++ } ++}; ++ ++const struct pci_ecam_ops pci_n1sdp_ccix_ecam_ops = { ++ .bus_shift = 20, ++ .init = pci_n1sdp_ccix_init, ++ .pci_ops = { ++ .map_bus = pci_n1sdp_map_bus, ++ .read = pci_generic_config_read32, ++ .write = pci_generic_config_write32, ++ } ++}; ++ ++static const struct of_device_id n1sdp_pcie_of_match[] = { ++ { .compatible = "arm,n1sdp-pcie", .data = &pci_n1sdp_pcie_ecam_ops }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, n1sdp_pcie_of_match); ++ ++static struct platform_driver n1sdp_pcie_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = n1sdp_pcie_of_match, ++ .suppress_bind_attrs = true, ++ }, ++ .probe = pci_host_common_probe, ++}; ++builtin_platform_driver(n1sdp_pcie_driver); +diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h +index adea5a4771cf..e6bbc037cef8 100644 +--- a/include/linux/pci-ecam.h ++++ b/include/linux/pci-ecam.h +@@ -87,6 +87,8 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 * + extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ + extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ + extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ ++extern const struct pci_ecam_ops pci_n1sdp_pcie_ecam_ops; /* Arm N1SDP PCIe */ ++extern const struct pci_ecam_ops pci_n1sdp_ccix_ecam_ops; /* Arm N1SDP PCIe */ + #endif + + #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0004-n1sdp-pcie-add-quirk-support-enabling-remote-chip-PC.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0004-n1sdp-pcie-add-quirk-support-enabling-remote-chip-PC.patch new file mode 100644 index 0000000000..c15b464c73 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0004-n1sdp-pcie-add-quirk-support-enabling-remote-chip-PC.patch @@ -0,0 +1,137 @@ +Upstream-Status: Inappropriate [will not be submitted as its an hack required to fix the hardware issue] +Signed-off-by: Sayanta Pattanayak <sayanta.pattanayak@arm.com> + +From 4d69e38213bf52a48f2f0239da8c7b76501428b2 Mon Sep 17 00:00:00 2001 +From: Sayanta Pattanayak <sayanta.pattanayak@arm.com> +Date: Wed, 9 Feb 2022 20:37:43 +0530 +Subject: [PATCH 4/5] n1sdp: pcie: add quirk support enabling remote chip PCIe + +Base address mapping for remote chip Root PCIe ECAM space. + +When two N1SDP boards are coupled via the CCIX connection, the PCI host +complex of the remote board appears as PCIe segment 2 on the primary board. +The resources of the secondary board, including the host complex, are +mapped at offset 0x40000000000 into the address space of the primary +board, so take that into account when accessing the remote PCIe segment. + +Change-Id: I0e8d1eb119aef6444b9df854a39b24441c12195a +Signed-off-by: Sayanta Pattanayak <sayanta.pattanayak@arm.com> +Signed-off-by: Khasim Syed Mohammed <khasim.mohammed@arm.com> +Signed-off-by: Andre Przywara <andre.przywara@arm.com> +Signed-off-by: sahil <sahil@arm.com> +--- + drivers/acpi/pci_mcfg.c | 1 + + drivers/pci/controller/pcie-n1sdp.c | 32 +++++++++++++++++++++++++---- + include/linux/pci-ecam.h | 1 + + 3 files changed, 30 insertions(+), 4 deletions(-) + +diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c +index f31727da21ac..58f59b5fffa2 100644 +--- a/drivers/acpi/pci_mcfg.c ++++ b/drivers/acpi/pci_mcfg.c +@@ -176,6 +176,7 @@ static struct mcfg_fixup mcfg_quirks[] = { + /* N1SDP SoC with v1 PCIe controller */ + N1SDP_ECAM_MCFG(0x20181101, 0, &pci_n1sdp_pcie_ecam_ops), + N1SDP_ECAM_MCFG(0x20181101, 1, &pci_n1sdp_ccix_ecam_ops), ++ N1SDP_ECAM_MCFG(0x20181101, 2, &pci_n1sdp_remote_pcie_ecam_ops), + }; + + static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; +diff --git a/drivers/pci/controller/pcie-n1sdp.c b/drivers/pci/controller/pcie-n1sdp.c +index 408699b9dcb1..a03665dd056a 100644 +--- a/drivers/pci/controller/pcie-n1sdp.c ++++ b/drivers/pci/controller/pcie-n1sdp.c +@@ -30,8 +30,10 @@ + + /* Platform specific values as hardcoded in the firmware. */ + #define AP_NS_SHARED_MEM_BASE 0x06000000 +-#define MAX_SEGMENTS 2 /* Two PCIe root complexes. */ ++/* Two PCIe root complexes in One Chip + One PCIe RC in Remote Chip */ ++#define MAX_SEGMENTS 3 + #define BDF_TABLE_SIZE SZ_16K ++#define REMOTE_CHIP_ADDR_OFFSET 0x40000000000 + + /* + * Shared memory layout as written by the SCP upon boot time: +@@ -97,12 +99,17 @@ static int pci_n1sdp_init(struct pci_config_window *cfg, unsigned int segment) + phys_addr_t table_base; + struct device *dev = cfg->parent; + struct pcie_discovery_data *shared_data; +- size_t bdfs_size; ++ size_t bdfs_size, rc_base_addr = 0; + + if (segment >= MAX_SEGMENTS) + return -ENODEV; + +- table_base = AP_NS_SHARED_MEM_BASE + segment * BDF_TABLE_SIZE; ++ if (segment > 1) { ++ rc_base_addr = REMOTE_CHIP_ADDR_OFFSET; ++ table_base = AP_NS_SHARED_MEM_BASE + REMOTE_CHIP_ADDR_OFFSET; ++ } else { ++ table_base = AP_NS_SHARED_MEM_BASE + segment * BDF_TABLE_SIZE; ++ } + + if (!request_mem_region(table_base, BDF_TABLE_SIZE, + "PCIe valid BDFs")) { +@@ -114,6 +121,7 @@ static int pci_n1sdp_init(struct pci_config_window *cfg, unsigned int segment) + table_base, BDF_TABLE_SIZE); + if (!shared_data) + return -ENOMEM; ++ rc_base_addr += shared_data->rc_base_addr; + + /* Copy the valid BDFs structure to allocated normal memory. */ + bdfs_size = sizeof(struct pcie_discovery_data) + +@@ -125,7 +133,7 @@ static int pci_n1sdp_init(struct pci_config_window *cfg, unsigned int segment) + memcpy_fromio(pcie_discovery_data[segment], shared_data, bdfs_size); + + rc_remapped_addr[segment] = devm_ioremap(dev, +- shared_data->rc_base_addr, ++ rc_base_addr, + PCI_CFG_SPACE_EXP_SIZE); + if (!rc_remapped_addr[segment]) { + dev_err(dev, "Cannot remap root port base\n"); +@@ -161,6 +169,12 @@ static int pci_n1sdp_ccix_init(struct pci_config_window *cfg) + return pci_n1sdp_init(cfg, 1); + } + ++/* Called for ACPI segment 2. */ ++static int pci_n1sdp_remote_pcie_init(struct pci_config_window *cfg) ++{ ++ return pci_n1sdp_init(cfg, 2); ++} ++ + const struct pci_ecam_ops pci_n1sdp_pcie_ecam_ops = { + .bus_shift = 20, + .init = pci_n1sdp_pcie_init, +@@ -181,6 +195,16 @@ const struct pci_ecam_ops pci_n1sdp_ccix_ecam_ops = { + } + }; + ++const struct pci_ecam_ops pci_n1sdp_remote_pcie_ecam_ops = { ++ .bus_shift = 20, ++ .init = pci_n1sdp_remote_pcie_init, ++ .pci_ops = { ++ .map_bus = pci_n1sdp_map_bus, ++ .read = pci_generic_config_read32, ++ .write = pci_generic_config_write32, ++ } ++}; ++ + static const struct of_device_id n1sdp_pcie_of_match[] = { + { .compatible = "arm,n1sdp-pcie", .data = &pci_n1sdp_pcie_ecam_ops }, + { }, +diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h +index e6bbc037cef8..7bd8c1d702ee 100644 +--- a/include/linux/pci-ecam.h ++++ b/include/linux/pci-ecam.h +@@ -89,6 +89,7 @@ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ + extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ + extern const struct pci_ecam_ops pci_n1sdp_pcie_ecam_ops; /* Arm N1SDP PCIe */ + extern const struct pci_ecam_ops pci_n1sdp_ccix_ecam_ops; /* Arm N1SDP PCIe */ ++extern const struct pci_ecam_ops pci_n1sdp_remote_pcie_ecam_ops; /* Arm N1SDP PCIe */ + #endif + + #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0005-arm64-kpti-Whitelist-early-Arm-Neoverse-N1-revisions.patch b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0005-arm64-kpti-Whitelist-early-Arm-Neoverse-N1-revisions.patch new file mode 100644 index 0000000000..040408615a --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/0005-arm64-kpti-Whitelist-early-Arm-Neoverse-N1-revisions.patch @@ -0,0 +1,34 @@ +Upstream-Status: Inappropriate +Signed-off-by: Andre Przywara <andre.przywara@arm.com> + +From d20f5afffadcdbaca7032f547cce80720d8a414a Mon Sep 17 00:00:00 2001 +From: Andre Przywara <andre.przywara@arm.com> +Date: Fri, 17 May 2019 17:39:27 +0100 +Subject: [PATCH 5/5] arm64: kpti: Whitelist early Arm Neoverse N1 revisions + +Early revisions (r1p0) of the Neoverse N1 core did not feature the +CSV3 field in ID_AA64PFR0_EL1 to advertise they are not affected by +the Spectre variant 3 (aka Meltdown) vulnerability. + +Add this particular revision to the whitelist to avoid enabling KPTI. + +Signed-off-by: Andre Przywara <andre.przywara@arm.com> +--- + arch/arm64/kernel/cpufeature.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c +index 6ec7036ef7e1..ceba98773608 100644 +--- a/arch/arm64/kernel/cpufeature.c ++++ b/arch/arm64/kernel/cpufeature.c +@@ -1509,6 +1509,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), ++ MIDR_REV(MIDR_NEOVERSE_N1, 1, 0), /* missing CSV3 */ + { /* sentinel */ } + }; + char const *str = "kpti command line option"; +-- +2.17.1 + diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/enable-realtek-R8169.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/enable-realtek-R8169.cfg new file mode 100644 index 0000000000..7a5747407c --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/enable-realtek-R8169.cfg @@ -0,0 +1,3 @@ +# Enable Realtek Gigabit Ethernet adapter +CONFIG_REALTEK_PHY=y +CONFIG_R8169=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/enable-usb_conn_gpio.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/enable-usb_conn_gpio.cfg new file mode 100644 index 0000000000..128c902710 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/enable-usb_conn_gpio.cfg @@ -0,0 +1,2 @@ +# PHY_TEGRA_XUSB sets this to y, but its set as m in defconfig +CONFIG_USB_CONN_GPIO=y diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/usb_xhci_pci_renesas.cfg b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/usb_xhci_pci_renesas.cfg new file mode 100644 index 0000000000..c06507c060 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-5.15/n1sdp/usb_xhci_pci_renesas.cfg @@ -0,0 +1,2 @@ +# CONFIG_USB_XHCI_PCI is not set +# CONFIG_USB_XHCI_PCI_RENESAS is not set diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-rt_%.bbappend b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-rt_%.bbappend new file mode 100644 index 0000000000..8994c241ec --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto-rt_%.bbappend @@ -0,0 +1,6 @@ +# Only enable linux-yocto-rt for n1sdp and the Armv8-R AArch64 AEM FVP +LINUX_YOCTO_RT_REQUIRE ?= "" +LINUX_YOCTO_RT_REQUIRE:n1sdp = "linux-arm-platforms.inc" +LINUX_YOCTO_RT_REQUIRE:fvp-baser-aemv8r64 = "linux-arm-platforms.inc" + +require ${LINUX_YOCTO_RT_REQUIRE} diff --git a/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto_%.bbappend b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto_%.bbappend new file mode 100644 index 0000000000..db850eaba8 --- /dev/null +++ b/meta-arm/meta-arm-bsp/recipes-kernel/linux/linux-yocto_%.bbappend @@ -0,0 +1,3 @@ +# Add support for Arm Platforms (boards or simulators) + +require linux-arm-platforms.inc |