From 89bbe4c798bc3a43c882179adb5222c1a972ac70 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Jun 2020 22:11:19 +1000 Subject: powerpc/64: indirect function call use bctrl rather than blrl in ret_from_kernel_thread blrl is not recommended to use as an indirect function call, as it may corrupt the link stack predictor. This is not a performance critical path but this should be fixed for consistency. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200611121119.1015740-1-npiggin@gmail.com --- arch/powerpc/kernel/entry_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9d49338e0c85..1013adc9acfb 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -233,12 +233,12 @@ _GLOBAL(ret_from_fork) _GLOBAL(ret_from_kernel_thread) bl schedule_tail REST_NVGPRS(r1) - mtlr r14 + mtctr r14 mr r3,r15 #ifdef PPC64_ELF_ABI_v2 mr r12,r14 #endif - blrl + bctrl li r3,0 b .Lsyscall_exit -- cgit v1.2.3 From f39eb5d8ac707fd59029a06c3f985f29b1aaa26b Mon Sep 17 00:00:00 2001 From: Murilo Opsfelder Araujo Date: Wed, 10 Jun 2020 18:51:12 -0300 Subject: powerpc/dt_cpu_ftrs: Remove unused macro ISA_V2_07B Macro ISA_V2_07B is defined but not used anywhere else in the code. Signed-off-by: Murilo Opsfelder Araujo Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200610215114.167544-2-muriloo@linux.ibm.com --- arch/powerpc/kernel/dt_cpu_ftrs.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 3a409517c031..1b7da8b5ce0d 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -24,7 +24,6 @@ /* Device-tree visible constants follow */ -#define ISA_V2_07B 2070 #define ISA_V3_0B 3000 #define ISA_V3_1 3100 -- cgit v1.2.3 From e781f12a60a7bddb50909d42478cca8724c8b113 Mon Sep 17 00:00:00 2001 From: Murilo Opsfelder Araujo Date: Wed, 10 Jun 2020 18:51:13 -0300 Subject: powerpc/dt_cpu_ftrs: Make use of macro ISA_V3_0B Macro ISA_V3_0B was defined but never used. Use it instead of literal. Signed-off-by: Murilo Opsfelder Araujo Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200610215114.167544-3-muriloo@linux.ibm.com --- arch/powerpc/kernel/dt_cpu_ftrs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1b7da8b5ce0d..9d6e833da2bd 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -673,7 +673,7 @@ static void __init cpufeatures_setup_start(u32 isa) { pr_info("setup for ISA %d\n", isa); - if (isa >= 3000) { + if (isa >= ISA_V3_0B) { cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300; cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00; } -- cgit v1.2.3 From 7714394706c0309b3f3fc474b390463d60eb6cb1 Mon Sep 17 00:00:00 2001 From: Murilo Opsfelder Araujo Date: Wed, 10 Jun 2020 18:51:14 -0300 Subject: powerpc/dt_cpu_ftrs: Make use of macro ISA_V3_1 Macro ISA_V3_1 was defined but never used. Use it instead of literal. Signed-off-by: Murilo Opsfelder Araujo Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200610215114.167544-4-muriloo@linux.ibm.com --- arch/powerpc/kernel/dt_cpu_ftrs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 9d6e833da2bd..a0edeb391e3e 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -678,7 +678,7 @@ static void __init cpufeatures_setup_start(u32 isa) cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00; } - if (isa >= 3100) { + if (isa >= ISA_V3_1) { cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31; cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1; } -- cgit v1.2.3 From f134a7cef1d7de45fab028a945b7f34c615718e1 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 12 Jun 2020 10:42:19 +1200 Subject: powerpc: Remove inaccessible CMDLINE default Since commit cbe46bd4f510 ("powerpc: remove CONFIG_CMDLINE #ifdef mess") CONFIG_CMDLINE has always had a value regardless of CONFIG_CMDLINE_BOOL. For example: $ make ARCH=powerpc defconfig $ cat .config # CONFIG_CMDLINE_BOOL is not set CONFIG_CMDLINE="" When enabling CONFIG_CMDLINE_BOOL this value is kept making the 'default "..." if CONFIG_CMDLINE_BOOL' ineffective. $ ./scripts/config --enable CONFIG_CMDLINE_BOOL $ cat .config CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="" Remove CONFIG_CMDLINE_BOOL and the inaccessible default. Signed-off-by: Chris Packham Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200611224220.25066-2-chris.packham@alliedtelesis.co.nz --- arch/powerpc/Kconfig | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9fa23eb320ff..51abc59c3334 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -859,12 +859,8 @@ config PPC_DENORMALISATION Add support for handling denormalisation of single precision values. Useful for bare metal only. If unsure say Y here. -config CMDLINE_BOOL - bool "Default bootloader kernel arguments" - config CMDLINE - string "Initial kernel command string" if CMDLINE_BOOL - default "console=ttyS0,9600 console=tty0 root=/dev/sda2" if CMDLINE_BOOL + string "Initial kernel command string" default "" help On some platforms, there is currently no way for the boot loader to -- cgit v1.2.3 From 0488d32530ecff973c40f2592a6eab2907d0a5cc Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 12 Jun 2020 10:42:20 +1200 Subject: powerpc/configs: Remove CMDLINE_BOOL Regenerate defconfigs to remove CONFIG_CMDLINE_BOOL and the default CONFIG_CMDLINE where applicable. Signed-off-by: Chris Packham Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200611224220.25066-3-chris.packham@alliedtelesis.co.nz --- arch/powerpc/configs/44x/akebono_defconfig | 2 -- arch/powerpc/configs/44x/arches_defconfig | 2 -- arch/powerpc/configs/44x/bamboo_defconfig | 2 -- arch/powerpc/configs/44x/bluestone_defconfig | 2 -- arch/powerpc/configs/44x/canyonlands_defconfig | 2 -- arch/powerpc/configs/44x/currituck_defconfig | 2 -- arch/powerpc/configs/44x/eiger_defconfig | 2 -- arch/powerpc/configs/44x/fsp2_defconfig | 1 - arch/powerpc/configs/44x/icon_defconfig | 2 -- arch/powerpc/configs/44x/iss476-smp_defconfig | 1 - arch/powerpc/configs/44x/katmai_defconfig | 2 -- arch/powerpc/configs/44x/rainier_defconfig | 2 -- arch/powerpc/configs/44x/redwood_defconfig | 2 -- arch/powerpc/configs/44x/sam440ep_defconfig | 2 -- arch/powerpc/configs/44x/sequoia_defconfig | 2 -- arch/powerpc/configs/44x/taishan_defconfig | 2 -- arch/powerpc/configs/44x/warp_defconfig | 1 - arch/powerpc/configs/holly_defconfig | 1 - arch/powerpc/configs/mvme5100_defconfig | 3 +-- arch/powerpc/configs/ps3_defconfig | 2 -- arch/powerpc/configs/skiroot_defconfig | 1 - arch/powerpc/configs/storcenter_defconfig | 1 - 22 files changed, 1 insertion(+), 38 deletions(-) diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig index 7705a5c3f4ea..60d5fa2c3b93 100644 --- a/arch/powerpc/configs/44x/akebono_defconfig +++ b/arch/powerpc/configs/44x/akebono_defconfig @@ -19,8 +19,6 @@ CONFIG_HIGHMEM=y CONFIG_HZ_100=y CONFIG_IRQ_ALL_CPUS=y # CONFIG_COMPACTION is not set -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" # CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig index 82c6f49b8dcb..41d04e70d4fb 100644 --- a/arch/powerpc/configs/44x/arches_defconfig +++ b/arch/powerpc/configs/44x/arches_defconfig @@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_EBONY is not set CONFIG_ARCHES=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig index 679213214a75..acbce718eaa8 100644 --- a/arch/powerpc/configs/44x/bamboo_defconfig +++ b/arch/powerpc/configs/44x/bamboo_defconfig @@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_BAMBOO=y # CONFIG_EBONY is not set -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig index 8006a5728afd..37088f250c9e 100644 --- a/arch/powerpc/configs/44x/bluestone_defconfig +++ b/arch/powerpc/configs/44x/bluestone_defconfig @@ -11,8 +11,6 @@ CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set CONFIG_BLUESTONE=y # CONFIG_EBONY is not set -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig index ccc14eb7a2f1..61776ade572b 100644 --- a/arch/powerpc/configs/44x/canyonlands_defconfig +++ b/arch/powerpc/configs/44x/canyonlands_defconfig @@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_EBONY is not set CONFIG_CANYONLANDS=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig index be76e066df01..34c86b3abecb 100644 --- a/arch/powerpc/configs/44x/currituck_defconfig +++ b/arch/powerpc/configs/44x/currituck_defconfig @@ -17,8 +17,6 @@ CONFIG_HIGHMEM=y CONFIG_HZ_100=y CONFIG_MATH_EMULATION=y CONFIG_IRQ_ALL_CPUS=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" # CONFIG_SUSPEND is not set CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig index 1abaa63e067f..509300f400e2 100644 --- a/arch/powerpc/configs/44x/eiger_defconfig +++ b/arch/powerpc/configs/44x/eiger_defconfig @@ -10,8 +10,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_EBONY is not set CONFIG_EIGER=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_PCIEPORTBUS=y # CONFIG_PCIEASPM is not set CONFIG_NET=y diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig index e67fc041ca3e..30845ce0885a 100644 --- a/arch/powerpc/configs/44x/fsp2_defconfig +++ b/arch/powerpc/configs/44x/fsp2_defconfig @@ -28,7 +28,6 @@ CONFIG_476FPE_ERR46=y CONFIG_SWIOTLB=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y -CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="ip=on rw" # CONFIG_SUSPEND is not set # CONFIG_PCI is not set diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig index 7d7ff84c8200..930948a1da76 100644 --- a/arch/powerpc/configs/44x/icon_defconfig +++ b/arch/powerpc/configs/44x/icon_defconfig @@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_EBONY is not set CONFIG_ICON=y CONFIG_HIGHMEM=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_PCIEPORTBUS=y # CONFIG_PCIEASPM is not set CONFIG_NET=y diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig index fb5c73a29bf4..2c3834eebca3 100644 --- a/arch/powerpc/configs/44x/iss476-smp_defconfig +++ b/arch/powerpc/configs/44x/iss476-smp_defconfig @@ -17,7 +17,6 @@ CONFIG_ISS4xx=y CONFIG_HZ_100=y CONFIG_MATH_EMULATION=y CONFIG_IRQ_ALL_CPUS=y -CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="root=/dev/issblk0" # CONFIG_PCI is not set CONFIG_ADVANCED_OPTIONS=y diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig index c6dc1445fc04..1a0f1c3e0ee9 100644 --- a/arch/powerpc/configs/44x/katmai_defconfig +++ b/arch/powerpc/configs/44x/katmai_defconfig @@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_EBONY is not set CONFIG_KATMAI=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig index c83ad03182df..6dd67de06a0b 100644 --- a/arch/powerpc/configs/44x/rainier_defconfig +++ b/arch/powerpc/configs/44x/rainier_defconfig @@ -10,8 +10,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_EBONY is not set CONFIG_RAINIER=y CONFIG_MATH_EMULATION=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig index 640fe1d5af28..e28d76416537 100644 --- a/arch/powerpc/configs/44x/redwood_defconfig +++ b/arch/powerpc/configs/44x/redwood_defconfig @@ -10,8 +10,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_EBONY is not set CONFIG_REDWOOD=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_PCIEPORTBUS=y # CONFIG_PCIEASPM is not set CONFIG_NET=y diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig index 22dc0dadf576..ef09786d49b9 100644 --- a/arch/powerpc/configs/44x/sam440ep_defconfig +++ b/arch/powerpc/configs/44x/sam440ep_defconfig @@ -12,8 +12,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_AMIGA_PARTITION=y # CONFIG_EBONY is not set CONFIG_SAM440EP=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig index 2c0973db8837..b4984eab43eb 100644 --- a/arch/powerpc/configs/44x/sequoia_defconfig +++ b/arch/powerpc/configs/44x/sequoia_defconfig @@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_EBONY is not set CONFIG_SEQUOIA=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig index a2d355ca62b2..3ea5932ab852 100644 --- a/arch/powerpc/configs/44x/taishan_defconfig +++ b/arch/powerpc/configs/44x/taishan_defconfig @@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_EBONY is not set CONFIG_TAISHAN=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index af66c69c49fe..47252c2d7669 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig @@ -14,7 +14,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_WARP=y CONFIG_PPC4xx_GPIO=y CONFIG_HZ_1000=y -CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="ip=on" # CONFIG_PCI is not set CONFIG_NET=y diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig index 067f433c8f5e..271daff47d1d 100644 --- a/arch/powerpc/configs/holly_defconfig +++ b/arch/powerpc/configs/holly_defconfig @@ -13,7 +13,6 @@ CONFIG_EMBEDDED6xx=y CONFIG_PPC_HOLLY=y CONFIG_GEN_RTC=y CONFIG_BINFMT_MISC=y -CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS0,115200" # CONFIG_SECCOMP is not set CONFIG_NET=y diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig index 0a0d046fc445..3d53d69ed36c 100644 --- a/arch/powerpc/configs/mvme5100_defconfig +++ b/arch/powerpc/configs/mvme5100_defconfig @@ -20,10 +20,9 @@ CONFIG_EMBEDDED6xx=y CONFIG_MVME5100=y CONFIG_KVM_GUEST=y CONFIG_HZ_100=y +CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs" # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_COMPACTION is not set -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs" CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 81b55c880fc3..142f1321fa58 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -34,8 +34,6 @@ CONFIG_KEXEC=y # CONFIG_SPARSEMEM_VMEMMAP is not set # CONFIG_COMPACTION is not set CONFIG_SCHED_SMT=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" CONFIG_PM=y CONFIG_PM_DEBUG=y # CONFIG_SECCOMP is not set diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index ad6739ac63dc..b806a5d3a695 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -45,7 +45,6 @@ CONFIG_IRQ_ALL_CPUS=y CONFIG_NUMA=y CONFIG_PPC_64K_PAGES=y CONFIG_SCHED_SMT=y -CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet" # CONFIG_SECCOMP is not set # CONFIG_PPC_MEM_KEYS is not set diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig index b964084e4056..47dcfaddc1ac 100644 --- a/arch/powerpc/configs/storcenter_defconfig +++ b/arch/powerpc/configs/storcenter_defconfig @@ -12,7 +12,6 @@ CONFIG_EMBEDDED6xx=y CONFIG_STORCENTER=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=y -CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttyS0,115200" # CONFIG_SECCOMP is not set CONFIG_NET=y -- cgit v1.2.3 From f0993c839e95dd6c7f054a1015e693c87e33e4fb Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Fri, 12 Jun 2020 14:33:03 +1000 Subject: powerpc/xive: Ignore kmemleak false positives xive_native_provision_pages() allocates memory and passes the pointer to OPAL so kmemleak cannot find the pointer usage in the kernel memory and produces a false positive report (below) (even if the kernel did scan OPAL memory, it is unable to deal with __pa() addresses anyway). This silences the warning. unreferenced object 0xc000200350c40000 (size 65536): comm "qemu-system-ppc", pid 2725, jiffies 4294946414 (age 70776.530s) hex dump (first 32 bytes): 02 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 ....P........... 01 00 08 07 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<0000000081ff046c>] xive_native_alloc_vp_block+0x120/0x250 [<00000000d555d524>] kvmppc_xive_compute_vp_id+0x248/0x350 [kvm] [<00000000d69b9c9f>] kvmppc_xive_connect_vcpu+0xc0/0x520 [kvm] [<000000006acbc81c>] kvm_arch_vcpu_ioctl+0x308/0x580 [kvm] [<0000000089c69580>] kvm_vcpu_ioctl+0x19c/0xae0 [kvm] [<00000000902ae91e>] ksys_ioctl+0x184/0x1b0 [<00000000f3e68bd7>] sys_ioctl+0x48/0xb0 [<0000000001b2c127>] system_call_exception+0x124/0x1f0 [<00000000d2b2ee40>] system_call_common+0xe8/0x214 Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612043303.84894-1-aik@ozlabs.ru --- arch/powerpc/sysdev/xive/native.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 71b881e554fc..cb58ec7ce77a 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -647,6 +648,7 @@ static bool xive_native_provision_pages(void) pr_err("Failed to allocate provisioning page\n"); return false; } + kmemleak_ignore(p); opal_xive_donate_page(chip, __pa(p)); } return true; -- cgit v1.2.3 From 55bd9ac468397c4f12a33b7ec714b5d0362c3aa2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 5 Jun 2020 07:18:06 -0700 Subject: powerpc/mm: Fix typo in IS_ENABLED() IS_ENABLED() matches names exactly, so the missing "CONFIG_" prefix means this code would never be built. Also fixes a missing newline in pr_warn(). Fixes: 970d54f99cea ("powerpc/book3s64/hash: Disable 16M linear mapping size if not aligned") Signed-off-by: Joe Perches Signed-off-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/202006050717.A2F9809E@keescook --- arch/powerpc/mm/book3s64/hash_utils.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 468169e33c86..084287fb2908 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -663,11 +663,10 @@ static void __init htab_init_page_sizes(void) * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default */ - if (IS_ENABLED(STRICT_KERNEL_RWX) && + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && (unsigned long)_stext % 0x1000000) { if (mmu_psize_defs[MMU_PAGE_16M].shift) - pr_warn("Kernel not 16M aligned, " - "disabling 16M linear map alignment"); + pr_warn("Kernel not 16M aligned, disabling 16M linear map alignment\n"); aligned = false; } -- cgit v1.2.3 From 7c466b0807960edc13e4b855be85ea765df9a6cd Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 15 Jun 2020 13:18:39 +0000 Subject: powerpc/ptdump: Fix build failure in hashpagetable.c H_SUCCESS is only defined when CONFIG_PPC_PSERIES is defined. != H_SUCCESS means != 0. Modify the test accordingly. Fixes: 65e701b2d2a8 ("powerpc/ptdump: drop non vital #ifdefs") Cc: stable@vger.kernel.org Reported-by: kernel test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/795158fc1d2b3dff3bf7347881947a887ea9391a.1592227105.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/hashpagetable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index a2c33efc7ce8..5b8bd34cd3a1 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -258,7 +258,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 * for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); - if (lpar_rc != H_SUCCESS) + if (lpar_rc) continue; for (j = 0; j < 4; j++) { if (HPTE_V_COMPARE(ptes[j].v, want_v) && -- cgit v1.2.3 From 178748b6d14946f080d49bc7dcc47b7cc4437e4d Mon Sep 17 00:00:00 2001 From: Satheesh Rajendran Date: Fri, 12 Jun 2020 19:59:53 +0530 Subject: powerpc/pseries/svm: Drop unused align argument in alloc_shared_lppaca() function Argument "align" in alloc_shared_lppaca() was unused inside the function. Let's drop it and update code comment for page alignment. Signed-off-by: Satheesh Rajendran Reviewed-by: Thiago Jung Bauermann Reviewed-by: Laurent Dufour [mpe: Massage comment wording/formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612142953.135408-1-sathnaga@linux.vnet.ibm.com --- arch/powerpc/kernel/paca.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 2168372b792d..5239a421b9df 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -57,8 +57,8 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align, #define LPPACA_SIZE 0x400 -static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, - unsigned long limit, int cpu) +static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit, + int cpu) { size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); static unsigned long shared_lppaca_size; @@ -68,6 +68,13 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, if (!shared_lppaca) { memblock_set_bottom_up(true); + /* + * See Documentation/powerpc/ultravisor.rst for more details. + * + * UV/HV data sharing is in PAGE_SIZE granularity. In order to + * minimize the number of pages shared, align the allocation to + * PAGE_SIZE. + */ shared_lppaca = memblock_alloc_try_nid(shared_lppaca_total_size, PAGE_SIZE, MEMBLOCK_LOW_LIMIT, @@ -122,7 +129,7 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) return NULL; if (is_secure_guest()) - lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu); + lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu); else lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu); -- cgit v1.2.3 From 5f202c1a1d429bee3ddab647711f181c72d157da Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 17 Jun 2020 10:38:35 +1000 Subject: powerpc/powernv/ioda: Return correct error if TCE level allocation failed The iommu_table_ops::xchg_no_kill() callback updates TCE. It is quite possible that not entire table is allocated if it is huge and multilevel so xchg may also allocate subtables. If failed, it returns H_HARDWARE for failed allocation and H_TOO_HARD if it needs it but cannot do because the alloc parameter is "false" (set when called with MMU=off to force retry with MMU=on). The problem is that having separate errors only matters in real mode (MMU=off) but the only caller with alloc="false" does not check the exact error code and simply returns H_TOO_HARD; and for every other mode alloc is "true". Also, the function is also called from the ioctl() handler of the VFIO SPAPR TCE IOMMU subdriver which does not expect hypervisor error codes (H_xxx) and will expose them to the userspace. This converts wrong error codes to -ENOMEM. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200617003835.48831-1-aik@ozlabs.ru --- arch/powerpc/platforms/powernv/pci-ioda-tce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index f923359d8afc..5218f5da2737 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -166,7 +166,7 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index, if (!ptce) { ptce = pnv_tce(tbl, false, idx, alloc); if (!ptce) - return alloc ? H_HARDWARE : H_TOO_HARD; + return -ENOMEM; } if (newtce & TCE_PCI_WRITE) -- cgit v1.2.3 From 01bd294642841998c76d9e6929597dcb7da9466a Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 20 May 2020 12:11:02 +1000 Subject: selftests/powerpc: Allow choice of CI memory location in alignment_handler test The alignment handler selftest needs cache-inhibited memory and currently /dev/fb0 is relied on to provided this. This prevents running the test on systems without /dev/fb0 (e.g., mambo). Read the commandline arguments for an optional path to be used instead, as well as an optional offset to be for mmaping this path. Signed-off-by: Jordan Niethe Tested-by: Alistair Popple Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200520021103.19798-1-jniethe5@gmail.com --- .../powerpc/alignment/alignment_handler.c | 63 ++++++++++++++-------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c index 0453c50c949c..eb6aba323f8b 100644 --- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c +++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c @@ -9,7 +9,17 @@ * This selftest exercises the powerpc alignment fault handler. * * We create two sets of source and destination buffers, one in regular memory, - * the other cache-inhibited (we use /dev/fb0 for this). + * the other cache-inhibited (by default we use /dev/fb0 for this, but an + * alterative path for cache-inhibited memory may be provided). + * + * One way to get cache-inhibited memory is to use the "mem" kernel parameter + * to limit the kernel to less memory than actually exists. Addresses above + * the limit may still be accessed but will be treated as cache-inhibited. For + * example, if there is actually 4GB of memory and the parameter "mem=3GB" is + * used, memory from address 0xC0000000 onwards is treated as cache-inhibited. + * To access this region /dev/mem is used. The kernel should be configured + * without CONFIG_STRICT_DEVMEM. In this case use: + * ./alignment_handler /dev/mem 0xc0000000 * * We initialise the source buffers, then use whichever set of load/store * instructions is under test to copy bytes from the source buffers to the @@ -53,6 +63,8 @@ int bufsize; int debug; int testing; volatile int gotsig; +char *cipath = "/dev/fb0"; +long cioffset; void sighandler(int sig, siginfo_t *info, void *ctx) { @@ -195,17 +207,18 @@ int do_test(char *test_name, void (*test_func)(char *, char *)) printf("\tDoing %s:\t", test_name); - fd = open("/dev/fb0", O_RDWR); + fd = open(cipath, O_RDWR); if (fd < 0) { printf("\n"); - perror("Can't open /dev/fb0 now?"); + perror("Can't open ci file now?"); return 1; } - ci0 = mmap(NULL, bufsize, PROT_WRITE, MAP_SHARED, - fd, 0x0); - ci1 = mmap(NULL, bufsize, PROT_WRITE, MAP_SHARED, - fd, bufsize); + ci0 = mmap(NULL, bufsize, PROT_WRITE | PROT_READ, MAP_SHARED, + fd, cioffset); + ci1 = mmap(NULL, bufsize, PROT_WRITE | PROT_READ, MAP_SHARED, + fd, cioffset + bufsize); + if ((ci0 == MAP_FAILED) || (ci1 == MAP_FAILED)) { printf("\n"); perror("mmap failed"); @@ -270,11 +283,11 @@ int do_test(char *test_name, void (*test_func)(char *, char *)) return rc; } -static bool can_open_fb0(void) +static bool can_open_cifile(void) { int fd; - fd = open("/dev/fb0", O_RDWR); + fd = open(cipath, O_RDWR); if (fd < 0) return false; @@ -286,7 +299,7 @@ int test_alignment_handler_vsx_206(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); printf("VSX: 2.06B\n"); @@ -304,7 +317,7 @@ int test_alignment_handler_vsx_207(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); printf("VSX: 2.07B\n"); @@ -320,7 +333,7 @@ int test_alignment_handler_vsx_300(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00)); printf("VSX: 3.00B\n"); @@ -352,7 +365,7 @@ int test_alignment_handler_integer(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); printf("Integer\n"); LOAD_DFORM_TEST(lbz); @@ -408,7 +421,7 @@ int test_alignment_handler_integer_206(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); printf("Integer: 2.06\n"); @@ -423,7 +436,7 @@ int test_alignment_handler_vmx(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC)); printf("VMX\n"); @@ -451,7 +464,7 @@ int test_alignment_handler_fp(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); printf("Floating point\n"); LOAD_FLOAT_DFORM_TEST(lfd); @@ -479,7 +492,7 @@ int test_alignment_handler_fp_205(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_05)); printf("Floating point: 2.05\n"); @@ -497,7 +510,7 @@ int test_alignment_handler_fp_206(void) { int rc = 0; - SKIP_IF(!can_open_fb0()); + SKIP_IF(!can_open_cifile()); SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); printf("Floating point: 2.06\n"); @@ -509,11 +522,12 @@ int test_alignment_handler_fp_206(void) void usage(char *prog) { - printf("Usage: %s [options]\n", prog); + printf("Usage: %s [options] [path [offset]]\n", prog); printf(" -d Enable debug error output\n"); printf("\n"); - printf("This test requires a POWER8 or POWER9 CPU and a usable "); - printf("framebuffer at /dev/fb0.\n"); + printf("This test requires a POWER8 or POWER9 CPU and either a "); + printf("usable framebuffer at /dev/fb0 or the path to usable "); + printf("cache-inhibited memory and optional offset to be provided\n"); } int main(int argc, char *argv[]) @@ -533,6 +547,13 @@ int main(int argc, char *argv[]) exit(1); } } + argc -= optind; + argv += optind; + + if (argc > 0) + cipath = argv[0]; + if (argc > 1) + cioffset = strtol(argv[1], 0, 0x10); bufsize = getpagesize(); -- cgit v1.2.3 From 620a6473df36f8dc6f70bc85ff3465b2e21d1254 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 20 May 2020 12:11:03 +1000 Subject: selftests/powerpc: Add prefixed loads/stores to alignment_handler test Extend the alignment handler selftest to exercise prefixed load store instructions. Add tests for prefixed VSX, floating point and integer instructions. Skip prefix tests if ISA version does not support prefixed instructions. Signed-off-by: Jordan Niethe Tested-by: Alistair Popple [mpe: Fixup PPC_FEATURE2_ARCH_3_1 naming as noted by Alistair] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200520021103.19798-2-jniethe5@gmail.com --- .../powerpc/alignment/alignment_handler.c | 93 +++++++++++++++++++++- .../selftests/powerpc/include/instructions.h | 77 ++++++++++++++++++ tools/testing/selftests/powerpc/include/utils.h | 5 ++ 3 files changed, 172 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c index eb6aba323f8b..55ef15184057 100644 --- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c +++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c @@ -58,6 +58,7 @@ #include #include "utils.h" +#include "instructions.h" int bufsize; int debug; @@ -96,6 +97,17 @@ void sighandler(int sig, siginfo_t *info, void *ctx) } \ rc |= do_test(#name, test_##name) +#define TESTP(name, ld_op, st_op, ld_reg, st_reg) \ + void test_##name(char *s, char *d) \ + { \ + asm volatile( \ + ld_op(ld_reg, %0, 0, 0) \ + st_op(st_reg, %1, 0, 0) \ + :: "r"(s), "r"(d), "r"(0) \ + : "memory", "vs0", "vs32", "r31"); \ + } \ + rc |= do_test(#name, test_##name) + #define LOAD_VSX_XFORM_TEST(op) TEST(op, op, stxvd2x, XFORM, 32, 32) #define STORE_VSX_XFORM_TEST(op) TEST(op, lxvd2x, op, XFORM, 32, 32) #define LOAD_VSX_DFORM_TEST(op) TEST(op, op, stxv, DFORM, 32, 32) @@ -115,6 +127,17 @@ void sighandler(int sig, siginfo_t *info, void *ctx) #define LOAD_FLOAT_XFORM_TEST(op) TEST(op, op, stfdx, XFORM, 0, 0) #define STORE_FLOAT_XFORM_TEST(op) TEST(op, lfdx, op, XFORM, 0, 0) +#define LOAD_MLS_PREFIX_TEST(op) TESTP(op, op, PSTD, 31, 31) +#define STORE_MLS_PREFIX_TEST(op) TESTP(op, PLD, op, 31, 31) + +#define LOAD_8LS_PREFIX_TEST(op) TESTP(op, op, PSTD, 31, 31) +#define STORE_8LS_PREFIX_TEST(op) TESTP(op, PLD, op, 31, 31) + +#define LOAD_FLOAT_MLS_PREFIX_TEST(op) TESTP(op, op, PSTFD, 0, 0) +#define STORE_FLOAT_MLS_PREFIX_TEST(op) TESTP(op, PLFD, op, 0, 0) + +#define LOAD_VSX_8LS_PREFIX_TEST(op, tail) TESTP(op, op, PSTXV ## tail, 0, 32) +#define STORE_VSX_8LS_PREFIX_TEST(op, tail) TESTP(op, PLXV ## tail, op, 32, 0) /* FIXME: Unimplemented tests: */ // STORE_DFORM_TEST(stq) /* FIXME: need two registers for quad */ @@ -361,6 +384,25 @@ int test_alignment_handler_vsx_300(void) return rc; } +int test_alignment_handler_vsx_prefix(void) +{ + int rc = 0; + + SKIP_IF(!can_open_cifile()); + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1)); + + printf("VSX: PREFIX\n"); + LOAD_VSX_8LS_PREFIX_TEST(PLXSD, 0); + LOAD_VSX_8LS_PREFIX_TEST(PLXSSP, 0); + LOAD_VSX_8LS_PREFIX_TEST(PLXV0, 0); + LOAD_VSX_8LS_PREFIX_TEST(PLXV1, 1); + STORE_VSX_8LS_PREFIX_TEST(PSTXSD, 0); + STORE_VSX_8LS_PREFIX_TEST(PSTXSSP, 0); + STORE_VSX_8LS_PREFIX_TEST(PSTXV0, 0); + STORE_VSX_8LS_PREFIX_TEST(PSTXV1, 1); + return rc; +} + int test_alignment_handler_integer(void) { int rc = 0; @@ -432,6 +474,27 @@ int test_alignment_handler_integer_206(void) return rc; } +int test_alignment_handler_integer_prefix(void) +{ + int rc = 0; + + SKIP_IF(!can_open_cifile()); + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1)); + + printf("Integer: PREFIX\n"); + LOAD_MLS_PREFIX_TEST(PLBZ); + LOAD_MLS_PREFIX_TEST(PLHZ); + LOAD_MLS_PREFIX_TEST(PLHA); + LOAD_MLS_PREFIX_TEST(PLWZ); + LOAD_8LS_PREFIX_TEST(PLWA); + LOAD_8LS_PREFIX_TEST(PLD); + STORE_MLS_PREFIX_TEST(PSTB); + STORE_MLS_PREFIX_TEST(PSTH); + STORE_MLS_PREFIX_TEST(PSTW); + STORE_8LS_PREFIX_TEST(PSTD); + return rc; +} + int test_alignment_handler_vmx(void) { int rc = 0; @@ -520,14 +583,32 @@ int test_alignment_handler_fp_206(void) return rc; } + +int test_alignment_handler_fp_prefix(void) +{ + int rc = 0; + + SKIP_IF(!can_open_cifile()); + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1)); + + printf("Floating point: PREFIX\n"); + LOAD_FLOAT_DFORM_TEST(lfs); + LOAD_FLOAT_MLS_PREFIX_TEST(PLFS); + LOAD_FLOAT_MLS_PREFIX_TEST(PLFD); + STORE_FLOAT_MLS_PREFIX_TEST(PSTFS); + STORE_FLOAT_MLS_PREFIX_TEST(PSTFD); + return rc; +} + void usage(char *prog) { printf("Usage: %s [options] [path [offset]]\n", prog); printf(" -d Enable debug error output\n"); printf("\n"); - printf("This test requires a POWER8 or POWER9 CPU and either a "); - printf("usable framebuffer at /dev/fb0 or the path to usable "); - printf("cache-inhibited memory and optional offset to be provided\n"); + printf("This test requires a POWER8, POWER9 or POWER10 CPU "); + printf("and either a usable framebuffer at /dev/fb0 or "); + printf("the path to usable cache inhibited memory and optional "); + printf("offset to be provided\n"); } int main(int argc, char *argv[]) @@ -573,10 +654,14 @@ int main(int argc, char *argv[]) "test_alignment_handler_vsx_207"); rc |= test_harness(test_alignment_handler_vsx_300, "test_alignment_handler_vsx_300"); + rc |= test_harness(test_alignment_handler_vsx_prefix, + "test_alignment_handler_vsx_prefix"); rc |= test_harness(test_alignment_handler_integer, "test_alignment_handler_integer"); rc |= test_harness(test_alignment_handler_integer_206, "test_alignment_handler_integer_206"); + rc |= test_harness(test_alignment_handler_integer_prefix, + "test_alignment_handler_integer_prefix"); rc |= test_harness(test_alignment_handler_vmx, "test_alignment_handler_vmx"); rc |= test_harness(test_alignment_handler_fp, @@ -585,5 +670,7 @@ int main(int argc, char *argv[]) "test_alignment_handler_fp_205"); rc |= test_harness(test_alignment_handler_fp_206, "test_alignment_handler_fp_206"); + rc |= test_harness(test_alignment_handler_fp_prefix, + "test_alignment_handler_fp_prefix"); return rc; } diff --git a/tools/testing/selftests/powerpc/include/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h index f36061eb6f0f..4efa6314bd96 100644 --- a/tools/testing/selftests/powerpc/include/instructions.h +++ b/tools/testing/selftests/powerpc/include/instructions.h @@ -66,4 +66,81 @@ static inline int paste_last(void *i) #define PPC_INST_PASTE __PASTE(0, 0, 0, 0) #define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1) +/* This defines the prefixed load/store instructions */ +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +#else +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +#endif + +#define __PPC_RA(a) (((a) & 0x1f) << 16) +#define __PPC_RS(s) (((s) & 0x1f) << 21) +#define __PPC_RT(t) __PPC_RS(t) +#define __PPC_PREFIX_R(r) (((r) & 0x1) << 20) + +#define PPC_PREFIX_MLS 0x06000000 +#define PPC_PREFIX_8LS 0x04000000 + +#define PPC_INST_LBZ 0x88000000 +#define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LHA 0xa8000000 +#define PPC_INST_LWZ 0x80000000 +#define PPC_INST_STB 0x98000000 +#define PPC_INST_STH 0xb0000000 +#define PPC_INST_STW 0x90000000 +#define PPC_INST_STD 0xf8000000 +#define PPC_INST_LFS 0xc0000000 +#define PPC_INST_LFD 0xc8000000 +#define PPC_INST_STFS 0xd0000000 +#define PPC_INST_STFD 0xd8000000 + +#define PREFIX_MLS(instr, t, a, r, d) stringify_in_c(.balign 64, , 4;) \ + stringify_in_c(.long PPC_PREFIX_MLS | \ + __PPC_PREFIX_R(r) | \ + (((d) >> 16) & 0x3ffff);) \ + stringify_in_c(.long (instr) | \ + __PPC_RT(t) | \ + __PPC_RA(a) | \ + ((d) & 0xffff);\n) + +#define PREFIX_8LS(instr, t, a, r, d) stringify_in_c(.balign 64, , 4;) \ + stringify_in_c(.long PPC_PREFIX_8LS | \ + __PPC_PREFIX_R(r) | \ + (((d) >> 16) & 0x3ffff);) \ + stringify_in_c(.long (instr) | \ + __PPC_RT(t) | \ + __PPC_RA(a) | \ + ((d) & 0xffff);\n) + +/* Prefixed Integer Load/Store instructions */ +#define PLBZ(t, a, r, d) PREFIX_MLS(PPC_INST_LBZ, t, a, r, d) +#define PLHZ(t, a, r, d) PREFIX_MLS(PPC_INST_LHZ, t, a, r, d) +#define PLHA(t, a, r, d) PREFIX_MLS(PPC_INST_LHA, t, a, r, d) +#define PLWZ(t, a, r, d) PREFIX_MLS(PPC_INST_LWZ, t, a, r, d) +#define PLWA(t, a, r, d) PREFIX_8LS(0xa4000000, t, a, r, d) +#define PLD(t, a, r, d) PREFIX_8LS(0xe4000000, t, a, r, d) +#define PLQ(t, a, r, d) PREFIX_8LS(0xe0000000, t, a, r, d) +#define PSTB(s, a, r, d) PREFIX_MLS(PPC_INST_STB, s, a, r, d) +#define PSTH(s, a, r, d) PREFIX_MLS(PPC_INST_STH, s, a, r, d) +#define PSTW(s, a, r, d) PREFIX_MLS(PPC_INST_STW, s, a, r, d) +#define PSTD(s, a, r, d) PREFIX_8LS(0xf4000000, s, a, r, d) +#define PSTQ(s, a, r, d) PREFIX_8LS(0xf0000000, s, a, r, d) + +/* Prefixed Floating-Point Load/Store Instructions */ +#define PLFS(frt, a, r, d) PREFIX_MLS(PPC_INST_LFS, frt, a, r, d) +#define PLFD(frt, a, r, d) PREFIX_MLS(PPC_INST_LFD, frt, a, r, d) +#define PSTFS(frs, a, r, d) PREFIX_MLS(PPC_INST_STFS, frs, a, r, d) +#define PSTFD(frs, a, r, d) PREFIX_MLS(PPC_INST_STFD, frs, a, r, d) + +/* Prefixed VSX Load/Store Instructions */ +#define PLXSD(vrt, a, r, d) PREFIX_8LS(0xa8000000, vrt, a, r, d) +#define PLXSSP(vrt, a, r, d) PREFIX_8LS(0xac000000, vrt, a, r, d) +#define PLXV0(s, a, r, d) PREFIX_8LS(0xc8000000, s, a, r, d) +#define PLXV1(s, a, r, d) PREFIX_8LS(0xcc000000, s, a, r, d) +#define PSTXSD(vrs, a, r, d) PREFIX_8LS(0xb8000000, vrs, a, r, d) +#define PSTXSSP(vrs, a, r, d) PREFIX_8LS(0xbc000000, vrs, a, r, d) +#define PSTXV0(s, a, r, d) PREFIX_8LS(0xd8000000, s, a, r, d) +#define PSTXV1(s, a, r, d) PREFIX_8LS(0xdc000000, s, a, r, d) + #endif /* _SELFTESTS_POWERPC_INSTRUCTIONS_H */ diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h index e089a0c30d9a..2e39b297054f 100644 --- a/tools/testing/selftests/powerpc/include/utils.h +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -101,6 +101,11 @@ do { \ #define PPC_FEATURE2_ARCH_3_00 0x00800000 #endif +/* POWER10 feature */ +#ifndef PPC_FEATURE2_ARCH_3_1 +#define PPC_FEATURE2_ARCH_3_1 0x00040000 +#endif + #if defined(__powerpc64__) #define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP] #define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR] -- cgit v1.2.3 From 03fd42d458fb9cb69e712600bd69ff77ff3a45a8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 15 Jun 2020 07:48:25 +0000 Subject: powerpc/fixmap: Fix FIX_EARLY_DEBUG_BASE when page size is 256k FIX_EARLY_DEBUG_BASE reserves a 128k area for debuging. When page size is 256k, the calculation results in a 0 number of pages, leading to the following failure: CC arch/powerpc/kernel/asm-offsets.s In file included from ./arch/powerpc/include/asm/nohash/32/pgtable.h:77:0, from ./arch/powerpc/include/asm/nohash/pgtable.h:8, from ./arch/powerpc/include/asm/pgtable.h:20, from ./include/linux/pgtable.h:6, from ./arch/powerpc/include/asm/kup.h:42, from ./arch/powerpc/include/asm/uaccess.h:9, from ./include/linux/uaccess.h:11, from ./include/linux/crypto.h:21, from ./include/crypto/hash.h:11, from ./include/linux/uio.h:10, from ./include/linux/socket.h:8, from ./include/linux/compat.h:15, from arch/powerpc/kernel/asm-offsets.c:14: ./arch/powerpc/include/asm/fixmap.h:75:2: error: overflow in enumeration values __end_of_permanent_fixed_addresses, ^ make[2]: *** [arch/powerpc/kernel/asm-offsets.s] Error 1 Ensure the debug area is at least one page. Fixes: b8e8efaa8639 ("powerpc: reserve fixmap entries for early debug") Reported-by: kernel test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ca8c9f8249f523b1fab873e67b81b11989d46553.1592207216.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/fixmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 29188810ba30..925cf89cbf4b 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -52,7 +52,7 @@ enum fixed_addresses { FIX_HOLE, /* reserve the top 128K for early debugging purposes */ FIX_EARLY_DEBUG_TOP = FIX_HOLE, - FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+((128*1024)/PAGE_SIZE)-1, + FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, -- cgit v1.2.3 From 548ad77d10f7ad6e5f84a0026978da2ed1df0dae Mon Sep 17 00:00:00 2001 From: Imre Kaloz Date: Thu, 22 Dec 2016 09:06:08 +0100 Subject: powerpc/4xx: ppc4xx compile flag optimizations This patch splits up the compile flags between ppc40x and ppc44x. Signed-off-by: John Crispin Signed-off-by: Imre Kaloz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1482393968-60623-1-git-send-email-john@phrozen.org --- arch/powerpc/Makefile | 3 ++- arch/powerpc/boot/Makefile | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index f310c32e88a4..3e8da9cf2eb9 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -246,7 +246,8 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # often slow when they are implemented at all KBUILD_CFLAGS += $(call cc-option,-mno-string) -cpu-as-$(CONFIG_4xx) += -Wa,-m405 +cpu-as-$(CONFIG_40x) += -Wa,-m405 +cpu-as-$(CONFIG_44x) += -Wa,-m440 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 cpu-as-$(CONFIG_E500) += -Wa,-me500 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 63d7456b9518..4d43cb59b4a4 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -70,10 +70,10 @@ BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj) DTC_FLAGS ?= -p 1024 $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405 -$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405 +$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440 $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405 -$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405 -$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405 +$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440 +$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405 -- cgit v1.2.3 From 86590e524ee834b629afc55d8e5786091fbf84cc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 22 Jun 2020 12:10:19 +0530 Subject: powerpc/mm/book3s64: Skip 16G page reservation with radix With hash translation, the hypervisor can hint the LPAR about 16GB contiguous range via ibm,expected#pages. The kernel marks the range specified in the device tree as reserved. Avoid doing this when using radix translation. Radix translation only supports 1G gigantic hugepage and kernel can do the 1G gigantic hugepage allocation via early memblock reservation. This can be done because with radix translation pages are not required to be contiguous on the host. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200622064019.16682-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 084287fb2908..eec6f4e5e481 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -596,7 +596,7 @@ static void __init htab_scan_page_sizes(void) } #ifdef CONFIG_HUGETLB_PAGE - if (!hugetlb_disabled) { + if (!hugetlb_disabled && !early_radix_enabled() ) { /* Reserve 16G huge page memory sections for huge pages */ of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); } -- cgit v1.2.3 From 105fb38124a490f38e9c1e23bb4c4a0b6ba12fdb Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 18 Jun 2020 12:07:46 +0000 Subject: powerpc/8xx: Modify ptep_get() Move ptep_get() close to pte_update(), in an ifdef section already dedicated to powerpc 8xx. This section contains explanation about the layout of page table entries. Also modify it to return 4 times the pte value instead of padding with zeroes. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/9f2df6621fcaf9eba15fadc61c169d0c8e2fb849.1592481938.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/pgtable.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index b0afbdd07740..b9e134d0f03a 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -249,6 +249,18 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p return old; } + +#ifdef CONFIG_PPC_16K_PAGES +#define __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + pte_basic_t val = READ_ONCE(ptep->pte); + pte_t pte = {val, val, val, val}; + + return pte; +} +#endif /* CONFIG_PPC_16K_PAGES */ + #else static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, unsigned long clr, unsigned long set, int huge) @@ -284,16 +296,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); } -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) -#define __HAVE_ARCH_PTEP_GET -static inline pte_t ptep_get(pte_t *ptep) -{ - pte_t pte = {READ_ONCE(ptep->pte), 0, 0, 0}; - - return pte; -} -#endif - #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -- cgit v1.2.3 From 828ca4320d130bbe1d12866152600c49ff6a9f79 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 4 Jun 2020 18:26:08 +0530 Subject: selftests/powerpc: Fix pkey access right updates The Power ISA mandates that all writes to the Authority Mask Register (AMR) must always be preceded as well as succeeded by a context synchronizing instruction. This makes sure that the tests follow this requirement when attempting to update a pkey's access rights. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200604125610.649668-2-sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/reg.h | 6 ++++++ tools/testing/selftests/powerpc/ptrace/core-pkey.c | 2 +- tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h index 022c5076b2c5..c0f2742a3a59 100644 --- a/tools/testing/selftests/powerpc/include/reg.h +++ b/tools/testing/selftests/powerpc/include/reg.h @@ -57,6 +57,12 @@ #define SPRN_PPR 896 /* Program Priority Register */ #define SPRN_AMR 13 /* Authority Mask Register - problem state */ +#define set_amr(v) asm volatile("isync;" \ + "mtspr " __stringify(SPRN_AMR) ",%0;" \ + "isync" : \ + : "r" ((unsigned long)(v)) \ + : "memory") + /* TEXASR register bits */ #define TEXASR_FC 0xFE00000000000000 #define TEXASR_FP 0x0100000000000000 diff --git a/tools/testing/selftests/powerpc/ptrace/core-pkey.c b/tools/testing/selftests/powerpc/ptrace/core-pkey.c index d5c64fee032d..bbc05ffc5860 100644 --- a/tools/testing/selftests/powerpc/ptrace/core-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/core-pkey.c @@ -150,7 +150,7 @@ static int child(struct shared_info *info) printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr, pkey1, pkey2, pkey3); - mtspr(SPRN_AMR, info->amr); + set_amr(info->amr); /* * We won't use pkey3. This tests whether the kernel restores the UAMOR diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index bdbbbe8431e0..904c04f8c919 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -126,7 +126,7 @@ static int child(struct shared_info *info) printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr1, pkey1, pkey2, pkey3); - mtspr(SPRN_AMR, info->amr1); + set_amr(info->amr1); /* Wait for parent to read our AMR value and write a new one. */ ret = prod_parent(&info->child_sync); -- cgit v1.2.3 From c405b738daf9d8e8a5aedfeb6be851681e65e54b Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 4 Jun 2020 18:26:09 +0530 Subject: selftests/powerpc: Move Hash MMU check to utilities This moves a function to test if the MMU is in Hash mode under the generic test utilities. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200604125610.649668-3-sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/utils.h | 1 + tools/testing/selftests/powerpc/mm/Makefile | 2 +- tools/testing/selftests/powerpc/mm/bad_accesses.c | 28 ----------------------- tools/testing/selftests/powerpc/utils.c | 28 +++++++++++++++++++++++ 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h index 2e39b297054f..9dbe607cc5ec 100644 --- a/tools/testing/selftests/powerpc/include/utils.h +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -60,6 +60,7 @@ static inline bool have_hwcap2(unsigned long ftr2) #endif bool is_ppc64le(void); +int using_hash_mmu(bool *using_hash); /* Yes, this is evil */ #define FAIL_IF(x) \ diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index b9103c4bb414..2389bf791fd6 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -10,7 +10,7 @@ TEST_GEN_FILES := tempfile top_srcdir = ../../../../.. include ../../lib.mk -$(TEST_GEN_PROGS): ../harness.c +$(TEST_GEN_PROGS): ../harness.c ../utils.c $(OUTPUT)/prot_sao: ../utils.c diff --git a/tools/testing/selftests/powerpc/mm/bad_accesses.c b/tools/testing/selftests/powerpc/mm/bad_accesses.c index adc465f499ef..a864ed7e2008 100644 --- a/tools/testing/selftests/powerpc/mm/bad_accesses.c +++ b/tools/testing/selftests/powerpc/mm/bad_accesses.c @@ -64,34 +64,6 @@ int bad_access(char *p, bool write) return 0; } -static int using_hash_mmu(bool *using_hash) -{ - char line[128]; - FILE *f; - int rc; - - f = fopen("/proc/cpuinfo", "r"); - FAIL_IF(!f); - - rc = 0; - while (fgets(line, sizeof(line), f) != NULL) { - if (strcmp(line, "MMU : Hash\n") == 0) { - *using_hash = true; - goto out; - } - - if (strcmp(line, "MMU : Radix\n") == 0) { - *using_hash = false; - goto out; - } - } - - rc = -1; -out: - fclose(f); - return rc; -} - static int test(void) { unsigned long i, j, addr, region_shift, page_shift, page_size; diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index 5ee0e98c4896..933678f1ed0a 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -293,3 +293,31 @@ void set_dscr(unsigned long val) asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); } + +int using_hash_mmu(bool *using_hash) +{ + char line[128]; + FILE *f; + int rc; + + f = fopen("/proc/cpuinfo", "r"); + FAIL_IF(!f); + + rc = 0; + while (fgets(line, sizeof(line), f) != NULL) { + if (strcmp(line, "MMU : Hash\n") == 0) { + *using_hash = true; + goto out; + } + + if (strcmp(line, "MMU : Radix\n") == 0) { + *using_hash = false; + goto out; + } + } + + rc = -1; +out: + fclose(f); + return rc; +} -- cgit v1.2.3 From 1addb6444791f9e87fce0eb9882ec96a4a76e615 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 4 Jun 2020 18:26:10 +0530 Subject: selftests/powerpc: Add test for execute-disabled pkeys Apart from read and write access, memory protection keys can also be used for restricting execute permission of pages on powerpc. This adds a test to verify if the feature works as expected. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200604125610.649668-4-sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/mm/.gitignore | 1 + tools/testing/selftests/powerpc/mm/Makefile | 3 +- .../testing/selftests/powerpc/mm/pkey_exec_prot.c | 388 +++++++++++++++++++++ 3 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/mm/pkey_exec_prot.c diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 2ca523255b1b..8f841f925baa 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -8,3 +8,4 @@ wild_bctr large_vm_fork_separation bad_accesses tlbie_test +pkey_exec_prot diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 2389bf791fd6..f9fa0ba7435c 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -3,7 +3,7 @@ noarg: $(MAKE) -C ../ TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ - large_vm_fork_separation bad_accesses + large_vm_fork_separation bad_accesses pkey_exec_prot TEST_GEN_PROGS_EXTENDED := tlbie_test TEST_GEN_FILES := tempfile @@ -17,6 +17,7 @@ $(OUTPUT)/prot_sao: ../utils.c $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 +$(OUTPUT)/pkey_exec_prot: CFLAGS += -m64 $(OUTPUT)/tempfile: dd if=/dev/zero of=$@ bs=64k count=1 diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c new file mode 100644 index 000000000000..7c7c93425c5e --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright 2020, Sandipan Das, IBM Corp. + * + * Test if applying execute protection on pages using memory + * protection keys works as expected. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include + +#include +#include + +#include "reg.h" +#include "utils.h" + +/* + * Older versions of libc use the Intel-specific access rights. + * Hence, override the definitions as they might be incorrect. + */ +#undef PKEY_DISABLE_ACCESS +#define PKEY_DISABLE_ACCESS 0x3 + +#undef PKEY_DISABLE_WRITE +#define PKEY_DISABLE_WRITE 0x2 + +#undef PKEY_DISABLE_EXECUTE +#define PKEY_DISABLE_EXECUTE 0x4 + +/* Older versions of libc do not not define this */ +#ifndef SEGV_PKUERR +#define SEGV_PKUERR 4 +#endif + +#define SI_PKEY_OFFSET 0x20 + +#define SYS_pkey_mprotect 386 +#define SYS_pkey_alloc 384 +#define SYS_pkey_free 385 + +#define PKEY_BITS_PER_PKEY 2 +#define NR_PKEYS 32 +#define PKEY_BITS_MASK ((1UL << PKEY_BITS_PER_PKEY) - 1) + +#define PPC_INST_NOP 0x60000000 +#define PPC_INST_TRAP 0x7fe00008 +#define PPC_INST_BLR 0x4e800020 + +#define sigsafe_err(msg) ({ \ + ssize_t nbytes __attribute__((unused)); \ + nbytes = write(STDERR_FILENO, msg, strlen(msg)); }) + +static inline unsigned long pkeyreg_get(void) +{ + return mfspr(SPRN_AMR); +} + +static inline void pkeyreg_set(unsigned long amr) +{ + set_amr(amr); +} + +static void pkey_set_rights(int pkey, unsigned long rights) +{ + unsigned long amr, shift; + + shift = (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY; + amr = pkeyreg_get(); + amr &= ~(PKEY_BITS_MASK << shift); + amr |= (rights & PKEY_BITS_MASK) << shift; + pkeyreg_set(amr); +} + +static int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey) +{ + return syscall(SYS_pkey_mprotect, addr, len, prot, pkey); +} + +static int sys_pkey_alloc(unsigned long flags, unsigned long rights) +{ + return syscall(SYS_pkey_alloc, flags, rights); +} + +static int sys_pkey_free(int pkey) +{ + return syscall(SYS_pkey_free, pkey); +} + +static volatile sig_atomic_t fault_pkey, fault_code, fault_type; +static volatile sig_atomic_t remaining_faults; +static volatile unsigned int *fault_addr; +static unsigned long pgsize, numinsns; +static unsigned int *insns; + +static void trap_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *) fault_addr) + sigsafe_err("got a fault for an unexpected address\n"); + + _exit(1); +} + +static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + int signal_pkey; + + /* + * In older versions of libc, siginfo_t does not have si_pkey as + * a member. + */ +#ifdef si_pkey + signal_pkey = sinfo->si_pkey; +#else + signal_pkey = *((int *)(((char *) sinfo) + SI_PKEY_OFFSET)); +#endif + + fault_code = sinfo->si_code; + + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *) fault_addr) { + sigsafe_err("got a fault for an unexpected address\n"); + _exit(1); + } + + /* Check if too many faults have occurred for a single test case */ + if (!remaining_faults) { + sigsafe_err("got too many faults for the same address\n"); + _exit(1); + } + + + /* Restore permissions in order to continue */ + switch (fault_code) { + case SEGV_ACCERR: + if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE)) { + sigsafe_err("failed to set access permissions\n"); + _exit(1); + } + break; + case SEGV_PKUERR: + if (signal_pkey != fault_pkey) { + sigsafe_err("got a fault for an unexpected pkey\n"); + _exit(1); + } + + switch (fault_type) { + case PKEY_DISABLE_ACCESS: + pkey_set_rights(fault_pkey, 0); + break; + case PKEY_DISABLE_EXECUTE: + /* + * Reassociate the exec-only pkey with the region + * to be able to continue. Unlike AMR, we cannot + * set IAMR directly from userspace to restore the + * permissions. + */ + if (mprotect(insns, pgsize, PROT_EXEC)) { + sigsafe_err("failed to set execute permissions\n"); + _exit(1); + } + break; + default: + sigsafe_err("got a fault with an unexpected type\n"); + _exit(1); + } + break; + default: + sigsafe_err("got a fault with an unexpected code\n"); + _exit(1); + } + + remaining_faults--; +} + +static int pkeys_unsupported(void) +{ + bool hash_mmu = false; + int pkey; + + /* Protection keys are currently supported on Hash MMU only */ + FAIL_IF(using_hash_mmu(&hash_mmu)); + SKIP_IF(!hash_mmu); + + /* Check if the system call is supported */ + pkey = sys_pkey_alloc(0, 0); + SKIP_IF(pkey < 0); + sys_pkey_free(pkey); + + return 0; +} + +static int test(void) +{ + struct sigaction segv_act, trap_act; + int pkey, ret, i; + + ret = pkeys_unsupported(); + if (ret) + return ret; + + /* Setup SIGSEGV handler */ + segv_act.sa_handler = 0; + segv_act.sa_sigaction = segv_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0); + segv_act.sa_flags = SA_SIGINFO; + segv_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0); + + /* Setup SIGTRAP handler */ + trap_act.sa_handler = 0; + trap_act.sa_sigaction = trap_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0); + trap_act.sa_flags = SA_SIGINFO; + trap_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0); + + /* Setup executable region */ + pgsize = getpagesize(); + numinsns = pgsize / sizeof(unsigned int); + insns = (unsigned int *) mmap(NULL, pgsize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + FAIL_IF(insns == MAP_FAILED); + + /* Write the instruction words */ + for (i = 1; i < numinsns - 1; i++) + insns[i] = PPC_INST_NOP; + + /* + * Set the first instruction as an unconditional trap. If + * the last write to this address succeeds, this should + * get overwritten by a no-op. + */ + insns[0] = PPC_INST_TRAP; + + /* + * Later, to jump to the executable region, we use a branch + * and link instruction (bctrl) which sets the return address + * automatically in LR. Use that to return back. + */ + insns[numinsns - 1] = PPC_INST_BLR; + + /* Allocate a pkey that restricts execution */ + pkey = sys_pkey_alloc(0, PKEY_DISABLE_EXECUTE); + FAIL_IF(pkey < 0); + + /* + * Pick the first instruction's address from the executable + * region. + */ + fault_addr = insns; + + /* The following two cases will avoid SEGV_PKUERR */ + fault_type = -1; + fault_pkey = -1; + + /* + * Read an instruction word from the address when AMR bits + * are not set i.e. the pkey permits both read and write + * access. + * + * This should not generate a fault as having PROT_EXEC + * implies PROT_READ on GNU systems. The pkey currently + * restricts execution only based on the IAMR bits. The + * AMR bits are cleared. + */ + remaining_faults = 0; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("read from %p, pkey is execute-disabled, access-enabled\n", + (void *) fault_addr); + i = *fault_addr; + FAIL_IF(remaining_faults != 0); + + /* + * Write an instruction word to the address when AMR bits + * are not set i.e. the pkey permits both read and write + * access. + * + * This should generate an access fault as having just + * PROT_EXEC also restricts writes. The pkey currently + * restricts execution only based on the IAMR bits. The + * AMR bits are cleared. + */ + remaining_faults = 1; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("write to %p, pkey is execute-disabled, access-enabled\n", + (void *) fault_addr); + *fault_addr = PPC_INST_TRAP; + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); + + /* The following three cases will generate SEGV_PKUERR */ + fault_type = PKEY_DISABLE_ACCESS; + fault_pkey = pkey; + + /* + * Read an instruction word from the address when AMR bits + * are set i.e. the pkey permits neither read nor write + * access. + * + * This should generate a pkey fault based on AMR bits only + * as having PROT_EXEC implicitly allows reads. + */ + remaining_faults = 1; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("read from %p, pkey is execute-disabled, access-disabled\n", + (void *) fault_addr); + pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); + i = *fault_addr; + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR); + + /* + * Write an instruction word to the address when AMR bits + * are set i.e. the pkey permits neither read nor write + * access. + * + * This should generate two faults. First, a pkey fault + * based on AMR bits and then an access fault since + * PROT_EXEC does not allow writes. + */ + remaining_faults = 2; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("write to %p, pkey is execute-disabled, access-disabled\n", + (void *) fault_addr); + pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); + *fault_addr = PPC_INST_NOP; + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); + + /* + * Jump to the executable region when AMR bits are set i.e. + * the pkey permits neither read nor write access. + * + * This should generate a pkey fault based on IAMR bits which + * are set to not permit execution. AMR bits should not affect + * execution. + * + * This also checks if the overwrite of the first instruction + * word from a trap to a no-op succeeded. + */ + fault_addr = insns; + fault_type = PKEY_DISABLE_EXECUTE; + fault_pkey = pkey; + remaining_faults = 1; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); + printf("execute at %p, pkey is execute-disabled, access-disabled\n", + (void *) fault_addr); + asm volatile("mtctr %0; bctrl" : : "r"(insns)); + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR); + + /* + * Free the current pkey and allocate a new one that is + * fully permissive. + */ + sys_pkey_free(pkey); + pkey = sys_pkey_alloc(0, 0); + + /* + * Jump to the executable region when AMR bits are not set + * i.e. the pkey permits read and write access. + * + * This should not generate any faults as the IAMR bits are + * also not set and hence will the pkey will not restrict + * execution. + */ + fault_pkey = pkey; + remaining_faults = 0; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("execute at %p, pkey is execute-enabled, access-enabled\n", + (void *) fault_addr); + asm volatile("mtctr %0; bctrl" : : "r"(insns)); + FAIL_IF(remaining_faults != 0); + + /* Cleanup */ + munmap((void *) insns, pgsize); + sys_pkey_free(pkey); + + return 0; +} + +int main(void) +{ + test_harness(test, "pkey_exec_prot"); +} -- cgit v1.2.3 From df4232d96e724d09e54a623362f9f610727f059f Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 23 Jun 2020 20:59:20 -0700 Subject: powerpc/boot: Use address-of operator on section symbols Clang warns: arch/powerpc/boot/main.c:107:18: warning: array comparison always evaluates to a constant [-Wtautological-compare] if (_initrd_end > _initrd_start) { ^ arch/powerpc/boot/main.c:155:20: warning: array comparison always evaluates to a constant [-Wtautological-compare] if (_esm_blob_end <= _esm_blob_start) ^ 2 warnings generated. These are not true arrays, they are linker defined symbols, which are just addresses. Using the address of operator silences the warning and does not change the resulting assembly with either clang/ld.lld or gcc/ld (tested with diff + objdump -Dr). Reported-by: Joel Stanley Signed-off-by: Nathan Chancellor Tested-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624035920.835571-1-natechancellor@gmail.com --- arch/powerpc/boot/main.c | 4 ++-- arch/powerpc/boot/ps3.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index a9d209135975..cae31a6e8f02 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c @@ -104,7 +104,7 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen, { /* If we have an image attached to us, it overrides anything * supplied by the loader. */ - if (_initrd_end > _initrd_start) { + if (&_initrd_end > &_initrd_start) { printf("Attached initrd image at 0x%p-0x%p\n\r", _initrd_start, _initrd_end); initrd_addr = (unsigned long)_initrd_start; @@ -152,7 +152,7 @@ static void prep_esm_blob(struct addr_range vmlinux, void *chosen) unsigned long esm_blob_addr, esm_blob_size; /* Do we have an ESM (Enter Secure Mode) blob? */ - if (_esm_blob_end <= _esm_blob_start) + if (&_esm_blob_end <= &_esm_blob_start) return; printf("Attached ESM blob at 0x%p-0x%p\n\r", diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c index c52552a681c5..6e4efbdb6b7c 100644 --- a/arch/powerpc/boot/ps3.c +++ b/arch/powerpc/boot/ps3.c @@ -127,7 +127,7 @@ void platform_init(void) ps3_repository_read_rm_size(&rm_size); dt_fixup_memory(0, rm_size); - if (_initrd_end > _initrd_start) { + if (&_initrd_end > &_initrd_start) { setprop_val(chosen, "linux,initrd-start", (u32)(_initrd_start)); setprop_val(chosen, "linux,initrd-end", (u32)(_initrd_end)); } -- cgit v1.2.3 From 86bc917d2ac117ec922dbf8ed92ca989bf333281 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 23 Jun 2020 23:03:20 +1000 Subject: powerpc/boot/dts: Fix dtc "pciex" warnings With CONFIG_OF_ALL_DTBS=y, as set by eg. allmodconfig, we see lots of warnings about our dts files, such as: arch/powerpc/boot/dts/glacier.dts:492.26-532.5: Warning (pci_bridge): /plb/pciex@d00000000: node name is not "pci" or "pcie" The node name should not particularly matter, it's just a name, and AFAICS there's no kernel code that cares whether nodes are *named* "pciex" or "pcie". So shutup these warnings by converting to the name dtc wants. As always there's some risk this could break something obscure that does rely on the name, in which case we can revert. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200623130320.405852-1-mpe@ellerman.id.au --- arch/powerpc/boot/dts/akebono.dts | 8 ++++---- arch/powerpc/boot/dts/bluestone.dts | 2 +- arch/powerpc/boot/dts/canyonlands.dts | 4 ++-- arch/powerpc/boot/dts/currituck.dts | 6 +++--- arch/powerpc/boot/dts/glacier.dts | 4 ++-- arch/powerpc/boot/dts/haleakala.dts | 2 +- arch/powerpc/boot/dts/icon.dts | 4 ++-- arch/powerpc/boot/dts/katmai.dts | 6 +++--- arch/powerpc/boot/dts/kilauea.dts | 4 ++-- arch/powerpc/boot/dts/makalu.dts | 4 ++-- arch/powerpc/boot/dts/redwood.dts | 6 +++--- 11 files changed, 25 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/boot/dts/akebono.dts b/arch/powerpc/boot/dts/akebono.dts index cd9d66041a3f..df18f8dc4642 100644 --- a/arch/powerpc/boot/dts/akebono.dts +++ b/arch/powerpc/boot/dts/akebono.dts @@ -248,7 +248,7 @@ }; }; - PCIE0: pciex@10100000000 { + PCIE0: pcie@10100000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -288,7 +288,7 @@ 0x0 0x0 0x0 0x4 &MPIC 48 0x2 /* int D */>; }; - PCIE1: pciex@20100000000 { + PCIE1: pcie@20100000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -328,7 +328,7 @@ 0x0 0x0 0x0 0x4 &MPIC 56 0x2 /* int D */>; }; - PCIE2: pciex@18100000000 { + PCIE2: pcie@18100000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -368,7 +368,7 @@ 0x0 0x0 0x0 0x4 &MPIC 64 0x2 /* int D */>; }; - PCIE3: pciex@28100000000 { + PCIE3: pcie@28100000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts index cc965a1816b6..aa1ae94cd776 100644 --- a/arch/powerpc/boot/dts/bluestone.dts +++ b/arch/powerpc/boot/dts/bluestone.dts @@ -325,7 +325,7 @@ }; }; - PCIE0: pciex@d00000000 { + PCIE0: pcie@d00000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index 0d6ac92d0f5e..c5fbb08e0a6e 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts @@ -461,7 +461,7 @@ interrupt-map = < 0x0 0x0 0x0 0x0 &UIC1 0x0 0x8 >; }; - PCIE0: pciex@d00000000 { + PCIE0: pcie@d00000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -503,7 +503,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>; }; - PCIE1: pciex@d20000000 { + PCIE1: pcie@d20000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts index b6d87b9c2cef..aea8af810106 100644 --- a/arch/powerpc/boot/dts/currituck.dts +++ b/arch/powerpc/boot/dts/currituck.dts @@ -122,7 +122,7 @@ }; }; - PCIE0: pciex@10100000000 { // 4xGBIF1 + PCIE0: pcie@10100000000 { // 4xGBIF1 device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -160,7 +160,7 @@ 0x0 0x0 0x0 0x4 &MPIC 49 0x2 /* int D */>; }; - PCIE1: pciex@30100000000 { // 4xGBIF0 + PCIE1: pcie@30100000000 { // 4xGBIF0 device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -197,7 +197,7 @@ 0x0 0x0 0x0 0x4 &MPIC 41 0x2 /* int D */>; }; - PCIE2: pciex@38100000000 { // 2xGBIF0 + PCIE2: pcie@38100000000 { // 2xGBIF0 device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts index a7a802f4ffdd..e84ff1afb58c 100644 --- a/arch/powerpc/boot/dts/glacier.dts +++ b/arch/powerpc/boot/dts/glacier.dts @@ -489,7 +489,7 @@ interrupt-map = < 0x0 0x0 0x0 0x0 &UIC1 0x0 0x8 >; }; - PCIE0: pciex@d00000000 { + PCIE0: pcie@d00000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -531,7 +531,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>; }; - PCIE1: pciex@d20000000 { + PCIE1: pcie@d20000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts index cb16dad43c92..f81ce8786d59 100644 --- a/arch/powerpc/boot/dts/haleakala.dts +++ b/arch/powerpc/boot/dts/haleakala.dts @@ -237,7 +237,7 @@ }; }; - PCIE0: pciex@a0000000 { + PCIE0: pcie@a0000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts index 2e6e3a7b2604..fbaa60b8f87a 100644 --- a/arch/powerpc/boot/dts/icon.dts +++ b/arch/powerpc/boot/dts/icon.dts @@ -315,7 +315,7 @@ interrupt-map = <0x0 0x0 0x0 0x0 &UIC1 19 0x8>; }; - PCIE0: pciex@d00000000 { + PCIE0: pcie@d00000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -356,7 +356,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>; }; - PCIE1: pciex@d20000000 { + PCIE1: pcie@d20000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts index 02629e119b87..a8f353229fb7 100644 --- a/arch/powerpc/boot/dts/katmai.dts +++ b/arch/powerpc/boot/dts/katmai.dts @@ -319,7 +319,7 @@ >; }; - PCIE0: pciex@d00000000 { + PCIE0: pcie@d00000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -360,7 +360,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>; }; - PCIE1: pciex@d20000000 { + PCIE1: pcie@d20000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -401,7 +401,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>; }; - PCIE2: pciex@d40000000 { + PCIE2: pcie@d40000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts index 2a3413221cc1..a709fb47a180 100644 --- a/arch/powerpc/boot/dts/kilauea.dts +++ b/arch/powerpc/boot/dts/kilauea.dts @@ -322,7 +322,7 @@ }; }; - PCIE0: pciex@a0000000 { + PCIE0: pcie@a0000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -363,7 +363,7 @@ 0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>; }; - PCIE1: pciex@c0000000 { + PCIE1: pcie@c0000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/makalu.dts b/arch/powerpc/boot/dts/makalu.dts index bf8fe1629392..c473cd911bca 100644 --- a/arch/powerpc/boot/dts/makalu.dts +++ b/arch/powerpc/boot/dts/makalu.dts @@ -268,7 +268,7 @@ }; }; - PCIE0: pciex@a0000000 { + PCIE0: pcie@a0000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -309,7 +309,7 @@ 0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>; }; - PCIE1: pciex@c0000000 { + PCIE1: pcie@c0000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts index f3e046fb49e2..f38035a1f4a1 100644 --- a/arch/powerpc/boot/dts/redwood.dts +++ b/arch/powerpc/boot/dts/redwood.dts @@ -235,7 +235,7 @@ has-new-stacr-staopc; }; }; - PCIE0: pciex@d00000000 { + PCIE0: pcie@d00000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -276,7 +276,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>; }; - PCIE1: pciex@d20000000 { + PCIE1: pcie@d20000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; @@ -317,7 +317,7 @@ 0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>; }; - PCIE2: pciex@d40000000 { + PCIE2: pcie@d40000000 { device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; -- cgit v1.2.3 From 76f09371bc05d6eb8d5a01823c9eaab768d6e934 Mon Sep 17 00:00:00 2001 From: Bin Meng Date: Fri, 1 May 2020 21:44:54 -0700 Subject: powerpc: Drop CONFIG_MTD_M25P80 in 85xx-hw.config Drop CONFIG_MTD_M25P80 that was removed in commit b35b9a10362d ("mtd: spi-nor: Move m25p80 code in spi-nor.c") Signed-off-by: Bin Meng Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1588394694-517-1-git-send-email-bmeng.cn@gmail.com --- arch/powerpc/configs/85xx-hw.config | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config index b507df6ac69f..524db76f47b7 100644 --- a/arch/powerpc/configs/85xx-hw.config +++ b/arch/powerpc/configs/85xx-hw.config @@ -67,7 +67,6 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_M25P80=y CONFIG_MTD_NAND_FSL_ELBC=y CONFIG_MTD_NAND_FSL_IFC=y CONFIG_MTD_RAW_NAND=y -- cgit v1.2.3 From 3337bf41e0dd70b4064cdf60acdfcdc2d050066c Mon Sep 17 00:00:00 2001 From: "Desnes A. Nunes do Rosario" Date: Fri, 26 Jun 2020 13:47:37 -0300 Subject: selftests/powerpc: Purge extra count_pmc() calls of ebb selftests An extra count on ebb_state.stats.pmc_count[PMC_INDEX(pmc)] is being per- formed when count_pmc() is used to reset PMCs on a few selftests. This extra pmc_count can occasionally invalidate results, such as the ones from cycles_test shown hereafter. The ebb_check_count() failed with an above the upper limit error due to the extra value on ebb_state.stats.pmc_count. Furthermore, this extra count is also indicated by extra PMC1 trace_log on the output of the cycle test (as well as on pmc56_overflow_test): ========== ... [21]: counter = 8 [22]: register SPRN_MMCR0 = 0x0000000080000080 [23]: register SPRN_PMC1 = 0x0000000080000004 [24]: counter = 9 [25]: register SPRN_MMCR0 = 0x0000000080000080 [26]: register SPRN_PMC1 = 0x0000000080000004 [27]: counter = 10 [28]: register SPRN_MMCR0 = 0x0000000080000080 [29]: register SPRN_PMC1 = 0x0000000080000004 >> [30]: register SPRN_PMC1 = 0x000000004000051e PMC1 count (0x280000546) above upper limit 0x2800003e8 (+0x15e) [FAIL] Test FAILED on line 52 failure: cycles ========== Signed-off-by: Desnes A. Nunes do Rosario Tested-by: Sachin Sant Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200626164737.21943-1-desnesn@linux.ibm.com --- tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/ebb.c | 2 -- .../testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c | 1 - tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c | 7 ------- tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c | 2 -- 11 files changed, 26 deletions(-) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c index a2d7b0e3dca9..a26ac122c759 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c @@ -91,8 +91,6 @@ int back_to_back_ebbs(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c index bc893813483e..bb9f587fa76e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c @@ -42,8 +42,6 @@ int cycles(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c index dcd351d20328..9ae795ce314e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c @@ -99,8 +99,6 @@ int cycles_with_freeze(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); printf("EBBs while frozen %d\n", ebbs_while_frozen); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c index 94c99c12c0f2..4b45a2e70f62 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c @@ -71,8 +71,6 @@ int cycles_with_mmcr2(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c index dfbc5c3ad52d..21537d6eb6b7 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c @@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c index ca2f7d729155..b208bf6ad58d 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c @@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); FAIL_IF(ebb_state.stats.ebb_count == 0); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c index ac3e6e182614..ba2681a12cc7 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c @@ -75,7 +75,6 @@ static int test_body(void) ebb_freeze_pmcs(); ebb_global_disable(); - count_pmc(4, sample_period); mtspr(SPRN_PMC4, 0xdead); dump_summary_ebb_state(); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c index b8242e9d97d2..791d37ba327b 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c @@ -70,13 +70,6 @@ int multi_counter(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - count_pmc(2, sample_period); - count_pmc(3, sample_period); - count_pmc(4, sample_period); - count_pmc(5, sample_period); - count_pmc(6, sample_period); - dump_ebb_state(); for (i = 0; i < 6; i++) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c index a05c0e18ded6..9b0f70d59702 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c @@ -61,8 +61,6 @@ static int cycles_child(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_summary_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c index 153ebc92234f..2904c741e04e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c @@ -82,8 +82,6 @@ static int test_body(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); if (mmcr0_mismatch) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c index eadad75ed7e6..b29f8ba22d1e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -76,8 +76,6 @@ int pmc56_overflow(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(2, sample_period); - dump_ebb_state(); printf("PMC5/6 overflow %d\n", pmc56_overflowed); -- cgit v1.2.3 From 5c699396f5f6cf6d67055af7b82c270d31fd831a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 30 Jun 2020 10:02:18 +1000 Subject: powerpc/xmon: Reset RCU and soft lockup watchdogs I'm seeing RCU warnings when exiting xmon. xmon resets the NMI watchdog, but does nothing with the RCU stall or soft lockup watchdogs. Add a helper function that handles all three. Signed-off-by: Anton Blanchard Acked-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200630100218.62a3c3fb@kryten.localdomain --- arch/powerpc/xmon/xmon.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 7efe4bc3ccf6..d27944e38b04 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -481,6 +481,13 @@ static inline int unrecoverable_excp(struct pt_regs *regs) #endif } +static void xmon_touch_watchdogs(void) +{ + touch_softlockup_watchdog_sync(); + rcu_cpu_stall_reset(); + touch_nmi_watchdog(); +} + static int xmon_core(struct pt_regs *regs, int fromipi) { int cmd = 0; @@ -718,7 +725,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) else insert_cpu_bpts(); - touch_nmi_watchdog(); + xmon_touch_watchdogs(); local_irq_restore(flags); return cmd != 'X' && cmd != EOF; -- cgit v1.2.3 From acccc984c1f2e49225b40f1d0d20d383ec27d4d0 Mon Sep 17 00:00:00 2001 From: Sam Bobroff Date: Tue, 30 Jun 2020 08:50:44 +1000 Subject: MAINTAINERS: Remove self from powerpc EEH I'm sorry to say I can no longer maintain this position. Signed-off-by: Sam Bobroff Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/aec7d729c28e35c7fa9969ec50229080c771195c.1593471043.git.sbobroff@linux.ibm.com --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 7b5ffd646c6b..7de395dfbdd5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13187,7 +13187,6 @@ F: tools/pci/ PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC M: Russell Currey -M: Sam Bobroff M: Oliver O'Halloran L: linuxppc-dev@lists.ozlabs.org S: Supported -- cgit v1.2.3 From 87db7579ebd5ded337056eb765542eb2608f16e3 Mon Sep 17 00:00:00 2001 From: Philippe Bergheaud Date: Fri, 19 Jun 2020 16:04:39 +0200 Subject: ocxl: control via sysfs whether the FPGA is reloaded on a link reset Some opencapi FPGA images allow to control if the FPGA should be reloaded on the next adapter reset. If it is supported, the image specifies it through a Vendor Specific DVSEC in the config space of function 0. Signed-off-by: Philippe Bergheaud Signed-off-by: Frederic Barrat Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200619140439.153962-1-fbarrat@linux.ibm.com --- Documentation/ABI/testing/sysfs-class-ocxl | 11 ++++ drivers/misc/ocxl/config.c | 81 ++++++++++++++++++++++++++++-- drivers/misc/ocxl/ocxl_internal.h | 6 +++ drivers/misc/ocxl/sysfs.c | 35 +++++++++++++ include/misc/ocxl-config.h | 1 + 5 files changed, 129 insertions(+), 5 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-class-ocxl b/Documentation/ABI/testing/sysfs-class-ocxl index b5b1fa197592..ae1276efa45a 100644 --- a/Documentation/ABI/testing/sysfs-class-ocxl +++ b/Documentation/ABI/testing/sysfs-class-ocxl @@ -33,3 +33,14 @@ Date: January 2018 Contact: linuxppc-dev@lists.ozlabs.org Description: read/write Give access the global mmio area for the AFU + +What: /sys/class/ocxl//reload_on_reset +Date: February 2020 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read/write + Control whether the FPGA is reloaded on a link reset. Enabled + through a vendor-specific logic block on the FPGA. + 0 Do not reload FPGA image from flash + 1 Reload FPGA image from flash + unavailable + The device does not support this capability diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index c8e19bfb5ef9..42f7a1298775 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -71,6 +71,20 @@ static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) return 0; } +/** + * get_function_0() - Find a related PCI device (function 0) + * @device: PCI device to match + * + * Returns a pointer to the related device, or null if not found + */ +static struct pci_dev *get_function_0(struct pci_dev *dev) +{ + unsigned int devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); + + return pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), + dev->bus->number, devfn); +} + static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn) { u16 val; @@ -159,14 +173,15 @@ static int read_dvsec_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn) static int read_dvsec_vendor(struct pci_dev *dev) { int pos; - u32 cfg, tlx, dlx; + u32 cfg, tlx, dlx, reset_reload; /* - * vendor specific DVSEC is optional + * vendor specific DVSEC, for IBM images only. Some older + * images may not have it * - * It's currently only used on function 0 to specify the - * version of some logic blocks. Some older images may not - * even have it so we ignore any errors + * It's only used on function 0 to specify the version of some + * logic blocks and to give access to special registers to + * enable host-based flashing. */ if (PCI_FUNC(dev->devfn) != 0) return 0; @@ -178,11 +193,67 @@ static int read_dvsec_vendor(struct pci_dev *dev) pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_CFG_VERS, &cfg); pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_TLX_VERS, &tlx); pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_DLX_VERS, &dlx); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); dev_dbg(&dev->dev, "Vendor specific DVSEC:\n"); dev_dbg(&dev->dev, " CFG version = 0x%x\n", cfg); dev_dbg(&dev->dev, " TLX version = 0x%x\n", tlx); dev_dbg(&dev->dev, " DLX version = 0x%x\n", dlx); + dev_dbg(&dev->dev, " ResetReload = 0x%x\n", reset_reload); + return 0; +} + +static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, + int *out_pos) +{ + int pos; + + if (PCI_FUNC(dev->devfn) != 0) { + dev = get_function_0(dev); + if (!dev) + return -1; + } + pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); + if (!pos) + return -1; + *dev0 = dev; + *out_pos = pos; + return 0; +} + +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + *val = !!(reset_reload & BIT(0)); + return 0; +} + +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + if (val) + reset_reload |= BIT(0); + else + reset_reload &= ~BIT(0); + pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + reset_reload); return 0; } diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index 345bf843a38e..af9a84aeee6f 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -112,6 +112,12 @@ void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size); */ int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count); +/* + * Control whether the FPGA is reloaded on a link reset + */ +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val); +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val); + /* * Check if an AFU index is valid for the given function. * diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c index 58f1ba264206..25c78df8055d 100644 --- a/drivers/misc/ocxl/sysfs.c +++ b/drivers/misc/ocxl/sysfs.c @@ -51,11 +51,46 @@ static ssize_t contexts_show(struct device *device, afu->pasid_count, afu->pasid_max); } +static ssize_t reload_on_reset_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int val; + + if (ocxl_config_get_reset_reload(pci_dev, &val)) + return scnprintf(buf, PAGE_SIZE, "unavailable\n"); + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t reload_on_reset_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int rc, val; + + rc = kstrtoint(buf, 0, &val); + if (rc || (val != 0 && val != 1)) + return -EINVAL; + + if (ocxl_config_set_reset_reload(pci_dev, val)) + return -ENODEV; + + return count; +} + static struct device_attribute afu_attrs[] = { __ATTR_RO(global_mmio_size), __ATTR_RO(pp_mmio_size), __ATTR_RO(afu_version), __ATTR_RO(contexts), + __ATTR_RW(reload_on_reset), }; static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj, diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h index 3526fa996a22..ccfd3b463517 100644 --- a/include/misc/ocxl-config.h +++ b/include/misc/ocxl-config.h @@ -41,5 +41,6 @@ #define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C #define OCXL_DVSEC_VENDOR_TLX_VERS 0x10 #define OCXL_DVSEC_VENDOR_DLX_VERS 0x20 +#define OCXL_DVSEC_VENDOR_RESET_RELOAD 0x38 #endif /* _OCXL_CONFIG_H_ */ -- cgit v1.2.3 From 5658cf085ba3c3f3c24ac0f7210f0473794df506 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Mon, 29 Jun 2020 16:07:01 +0530 Subject: powerpc/cacheinfo: Use cpumap_print to print cpumap Tejun Heo had modified shared_cpu_map_show() to use scnprintf instead of cpumap_print during support for *pb[l] format. Refer commit 0c118b7bd09a ("powerpc: use %*pb[l] to print bitmaps including cpumasks and nodemasks"). cpumap_print_to_pagebuf() is a standard function to print cpumap. With commit 9cf79d115f0d ("bitmap: remove explicit newline handling using scnprintf format string"), there is no need to print explicit newline and trailing null character. cpumap_print_to_pagebuf() internally uses scnprintf(). Hence replace scnprintf() with cpumap_print_to_pagebuf(). Note: shared_cpu_map_show() in drivers/base/cacheinfo.c already uses cpumap_print_to_pagebuf(). Before this patch: # cat /sys/devices/system/cpu0/cache/index1/shared_cpu_map 00ff # (Notice the extra blank line). After this patch: # cat /sys/devices/system/cpu0/cache/index1/shared_cpu_map 00ff # Signed-off-by: Srikar Dronamraju Acked-by: Tejun Heo Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200629103703.4538-2-srikar@linux.vnet.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 470336277c67..0d3c45e2fccd 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -652,7 +652,7 @@ static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *att struct cache_index_dir *index; struct cache *cache; const struct cpumask *mask; - int ret, cpu; + int cpu; index = kobj_to_cache_index_dir(k); cache = index->cache; @@ -664,11 +664,7 @@ static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *att mask = &cache->shared_cpu_map; } - ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n", - cpumask_pr_args(mask)); - buf[ret++] = '\n'; - buf[ret] = '\0'; - return ret; + return cpumap_print_to_pagebuf(false, buf, mask); } static struct kobj_attribute cache_shared_cpu_map_attr = -- cgit v1.2.3 From 74b7492e417812ea0f5002e210e2ac07a5728d17 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Mon, 29 Jun 2020 16:07:02 +0530 Subject: powerpc/cacheinfo: Make cpumap_show code reusable In anticipation of implementing shared_cpu_list, move code under shared_cpu_map_show() to a common function. No functional changes. Signed-off-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200629103703.4538-3-srikar@linux.vnet.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 0d3c45e2fccd..5be870f99623 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -647,7 +647,8 @@ static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache * return &cache->shared_cpu_map; } -static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) +static ssize_t +show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) { struct cache_index_dir *index; struct cache *cache; @@ -664,7 +665,12 @@ static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *att mask = &cache->shared_cpu_map; } - return cpumap_print_to_pagebuf(false, buf, mask); + return cpumap_print_to_pagebuf(list, buf, mask); +} + +static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) +{ + return show_shared_cpumap(k, attr, buf, false) } static struct kobj_attribute cache_shared_cpu_map_attr = -- cgit v1.2.3 From a87a77cb947cc9fc89f0dad51aeee66a61cc7fc4 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Mon, 29 Jun 2020 16:07:03 +0530 Subject: powerpc/cacheinfo: Add per cpu per index shared_cpu_list Unlike drivers/base/cacheinfo, powerpc cacheinfo code is not exposing shared_cpu_list under /sys/devices/system/cpu/cpu/cache/index Add shared_cpu_list to per cpu per index directory to maintain parity with x86. Some scripts (example: mmtests https://github.com/gormanm/mmtests) seem to be looking for shared_cpu_list instead of shared_cpu_map. Before this patch: # ls /sys/devices/system/cpu0/cache/index1 coherency_line_size number_of_sets size ways_of_associativity level shared_cpu_map type # cat /sys/devices/system/cpu0/cache/index1/shared_cpu_map 00ff # After this patch: # ls /sys/devices/system/cpu0/cache/index1 coherency_line_size number_of_sets shared_cpu_map type level shared_cpu_list size ways_of_associativity # cat /sys/devices/system/cpu0/cache/index1/shared_cpu_map 00ff # cat /sys/devices/system/cpu0/cache/index1/shared_cpu_list 0-7 # Signed-off-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200629103703.4538-4-srikar@linux.vnet.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 5be870f99623..d8d4552af30a 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -670,12 +670,20 @@ show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bo static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) { - return show_shared_cpumap(k, attr, buf, false) + return show_shared_cpumap(k, attr, buf, false); +} + +static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) +{ + return show_shared_cpumap(k, attr, buf, true); } static struct kobj_attribute cache_shared_cpu_map_attr = __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); +static struct kobj_attribute cache_shared_cpu_list_attr = + __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); + /* Attributes which should always be created -- the kobject/sysfs core * does this automatically via kobj_type->default_attrs. This is the * minimum data required to uniquely identify a cache. @@ -684,6 +692,7 @@ static struct attribute *cache_index_default_attrs[] = { &cache_type_attr.attr, &cache_level_attr.attr, &cache_shared_cpu_map_attr.attr, + &cache_shared_cpu_list_attr.attr, NULL, }; -- cgit v1.2.3 From c339f9be304c21da1c42899a824f84a2cc9ced30 Mon Sep 17 00:00:00 2001 From: Abhishek Goel Date: Mon, 6 Jul 2020 00:32:58 -0500 Subject: cpuidle/powernv : Remove dead code block Commit 1961acad2f88559c2cdd2ef67c58c3627f1f6e54 removes usage of function "validate_dt_prop_sizes". This patch removes this unused function. Signed-off-by: Abhishek Goel Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200706053258.121475-1-huntbag@linux.vnet.ibm.com --- drivers/cpuidle/cpuidle-powernv.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 1b299e801f74..addaa6e6718b 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -244,20 +244,6 @@ static inline void add_powernv_state(int index, const char *name, stop_psscr_table[index].mask = psscr_mask; } -/* - * Returns 0 if prop1_len == prop2_len. Else returns -1 - */ -static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, - const char *prop2, int prop2_len) -{ - if (prop1_len == prop2_len) - return 0; - - pr_warn("cpuidle-powernv: array sizes don't match for %s and %s\n", - prop1, prop2); - return -1; -} - extern u32 pnv_get_supported_cpuidle_states(void); static int powernv_add_idle_states(void) { -- cgit v1.2.3 From 93eacd94e09db2b1bb0343f8115385e5c34abf0a Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sun, 5 Jul 2020 23:35:56 +1000 Subject: powerpc/powernv: Make pnv_pci_sriov_enable() and friends static The kernel test robot noticed these are non-static which causes Clang to print some warnings. These are called via ppc_md function pointers so there's no need for them to be non-static. Reported-by: kernel test robot Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200705133557.443607-1-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 73a63efcf855..c2d46d28114b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1490,7 +1490,7 @@ static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) } } -void pnv_pci_sriov_disable(struct pci_dev *pdev) +static void pnv_pci_sriov_disable(struct pci_dev *pdev) { struct pci_bus *bus; struct pci_controller *hose; @@ -1600,7 +1600,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) } } -int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { struct pci_bus *bus; struct pci_controller *hose; @@ -1715,7 +1715,7 @@ m64_failed: return ret; } -int pnv_pcibios_sriov_disable(struct pci_dev *pdev) +static int pnv_pcibios_sriov_disable(struct pci_dev *pdev) { pnv_pci_sriov_disable(pdev); @@ -1724,7 +1724,7 @@ int pnv_pcibios_sriov_disable(struct pci_dev *pdev) return 0; } -int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +static int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { /* Allocate PCI data */ add_sriov_vf_pdns(pdev); -- cgit v1.2.3 From e3417faec526cbf97773dca691dcd743f5bfeb64 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sun, 5 Jul 2020 23:35:57 +1000 Subject: powerpc/powernv: Move pnv_ioda_setup_bus_dma under CONFIG_IOMMU_API pnv_ioda_setup_bus_dma() is only used when a passed through PE is returned to the host. If the kernel is built without IOMMU support this is dead code. Move it under the #ifdef with the rest of the IOMMU API support. Reported-by: kernel test robot Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200705133557.443607-2-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c2d46d28114b..31c3e6d58c41 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1885,19 +1885,6 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, return false; } -static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); - dev->dev.archdata.dma_offset = pe->tce_bypass_base; - - if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_bus_dma(pe, dev->subordinate); - } -} - static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb, bool real_mode) { @@ -2547,6 +2534,19 @@ static long pnv_pci_ioda2_create_table_userspace( return ret; } +static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); + dev->dev.archdata.dma_offset = pe->tce_bypass_base; + + if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) + pnv_ioda_setup_bus_dma(pe, dev->subordinate); + } +} + static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, -- cgit v1.2.3 From b648a5132ca3237a0f1ce5d871fff342b0efcf8a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 6 Jul 2020 15:22:46 +0200 Subject: powerpc/spufs: add CONFIG_COREDUMP dependency The kernel test robot pointed out a slightly different error message after recent commit 5456ffdee666 ("powerpc/spufs: simplify spufs core dumping") to spufs for a configuration that never worked: powerpc64-linux-ld: arch/powerpc/platforms/cell/spufs/file.o: in function `.spufs_proxydma_info_dump': >> file.c:(.text+0x4c68): undefined reference to `.dump_emit' powerpc64-linux-ld: arch/powerpc/platforms/cell/spufs/file.o: in function `.spufs_dma_info_dump': file.c:(.text+0x4d70): undefined reference to `.dump_emit' powerpc64-linux-ld: arch/powerpc/platforms/cell/spufs/file.o: in function `.spufs_wbox_info_dump': file.c:(.text+0x4df4): undefined reference to `.dump_emit' Add a Kconfig dependency to prevent this from happening again. Reported-by: kernel test robot Signed-off-by: Arnd Bergmann Acked-by: Jeremy Kerr Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200706132302.3885935-1-arnd@arndb.de --- arch/powerpc/platforms/cell/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 0f7c8241912b..f2ff359041ee 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -44,6 +44,7 @@ config SPU_FS tristate "SPU file system" default m depends on PPC_CELL + depends on COREDUMP select SPU_BASE help The SPU file system is used to access Synergistic Processing -- cgit v1.2.3 From 0138ba5783ae0dcc799ad401a1e8ac8333790df9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 11 May 2020 20:19:52 +1000 Subject: powerpc/64/signal: Balance return predictor stack in signal trampoline Returning from an interrupt or syscall to a signal handler currently begins execution directly at the handler's entry point, with LR set to the address of the sigreturn trampoline. When the signal handler function returns, it runs the trampoline. It looks like this: # interrupt at user address xyz # kernel stuff... signal is raised rfid # void handler(int sig) addis 2,12,.TOC.-.LCF0@ha addi 2,2,.TOC.-.LCF0@l mflr 0 std 0,16(1) stdu 1,-96(1) # handler stuff ld 0,16(1) mtlr 0 blr # __kernel_sigtramp_rt64 addi r1,r1,__SIGNAL_FRAMESIZE li r0,__NR_rt_sigreturn sc # kernel executes rt_sigreturn rfid # back to user address xyz Note the blr with no matching bl. This can corrupt the return predictor. Solve this by instead resuming execution at the signal trampoline which then calls the signal handler. qtrace-tools link_stack checker confirms the entire user/kernel/vdso cycle is balanced after this patch, whereas it's not upstream. Alan confirms the dwarf unwind info still looks good. gdb still recognises the signal frame and can step into parent frames if it break inside a signal handler. Performance is pretty noisy, not a very significant change on a POWER9 here, but branch misses are consistently a lot lower on a microbenchmark: Performance counter stats for './signal': 13,085.72 msec task-clock # 1.000 CPUs utilized 45,024,760,101 cycles # 3.441 GHz 65,102,895,542 instructions # 1.45 insn per cycle 11,271,673,787 branches # 861.372 M/sec 59,468,979 branch-misses # 0.53% of all branches 12,989.09 msec task-clock # 1.000 CPUs utilized 44,692,719,559 cycles # 3.441 GHz 65,109,984,964 instructions # 1.46 insn per cycle 11,282,136,057 branches # 868.585 M/sec 39,786,942 branch-misses # 0.35% of all branches Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200511101952.1463138-1-npiggin@gmail.com --- arch/powerpc/include/asm/ppc-opcode.h | 1 + arch/powerpc/kernel/signal_64.c | 22 ++++++++++++---------- arch/powerpc/kernel/vdso64/sigtramp.S | 13 +++++-------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 2a39c716c343..bce879fb9afd 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -332,6 +332,7 @@ #define PPC_INST_BLR 0x4e800020 #define PPC_INST_BLRL 0x4e800021 #define PPC_INST_BCTR 0x4e800420 +#define PPC_INST_BCTRL 0x4e800421 #define PPC_INST_MULLD 0x7c0001d2 #define PPC_INST_MULLW 0x7c0001d6 #define PPC_INST_MULHWU 0x7c000016 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 55e5f76554da..97729642f955 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -39,8 +39,8 @@ #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) #define FP_REGS_SIZE sizeof(elf_fpregset_t) -#define TRAMP_TRACEBACK 3 -#define TRAMP_SIZE 6 +#define TRAMP_TRACEBACK 4 +#define TRAMP_SIZE 7 /* * When we have signals to deliver, we set up on the user stack, @@ -600,13 +600,15 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) int i; long err = 0; + /* bctrl # call the handler */ + err |= __put_user(PPC_INST_BCTRL, &tramp[0]); /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) | - (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); + (__SIGNAL_FRAMESIZE & 0xffff), &tramp[1]); /* li r0, __NR_[rt_]sigreturn| */ - err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[1]); + err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[2]); /* sc */ - err |= __put_user(PPC_INST_SC, &tramp[2]); + err |= __put_user(PPC_INST_SC, &tramp[3]); /* Minimal traceback info */ for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) @@ -864,12 +866,12 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { - regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; + regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) goto badframe; - regs->link = (unsigned long) &frame->tramp[0]; + regs->nip = (unsigned long) &frame->tramp[0]; } /* Allocate a dummy caller frame for the signal handler. */ @@ -878,8 +880,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, /* Set up "regs" so we "return" to the signal handler. */ if (is_elf2_task()) { - regs->nip = (unsigned long) ksig->ka.sa.sa_handler; - regs->gpr[12] = regs->nip; + regs->ctr = (unsigned long) ksig->ka.sa.sa_handler; + regs->gpr[12] = regs->ctr; } else { /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function @@ -889,7 +891,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, func_descr_t __user *funct_desc_ptr = (func_descr_t __user *) ksig->ka.sa.sa_handler; - err |= get_user(regs->nip, &funct_desc_ptr->entry); + err |= get_user(regs->ctr, &funct_desc_ptr->entry); err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); } diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index a8cc0409d7d2..bbf68cd01088 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -6,6 +6,7 @@ * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. * Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp. */ +#include /* IFETCH_ALIGN_BYTES */ #include #include #include @@ -14,21 +15,17 @@ .text -/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from - the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artificially - extend the range covered by the unwind info by padding before the - real start. */ - nop .balign 8 + .balign IFETCH_ALIGN_BYTES V_FUNCTION_BEGIN(__kernel_sigtramp_rt64) -.Lsigrt_start = . - 4 +.Lsigrt_start: + bctrl /* call the handler */ addi r1, r1, __SIGNAL_FRAMESIZE li r0,__NR_rt_sigreturn sc .Lsigrt_end: V_FUNCTION_END(__kernel_sigtramp_rt64) -/* The ".balign 8" above and the following zeros mimic the old stack +/* The .balign 8 above and the following zeros mimic the old stack trampoline layout. The last magic value is the ucontext pointer, chosen in such a way that older libgcc unwind code returns a zero for a sigcontext pointer. */ -- cgit v1.2.3 From 1f9bb31e58118e14ab66239796f2fbe633e1ad32 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 8 Jul 2020 17:49:42 +1000 Subject: selftests/powerpc: Add FPU denormal test Add a testcase that tries to trigger the FPU denormal exception on Power8 or earlier CPUs. Prior to commit 4557ac6b344b ("powerpc/64s/exception: Fix 0x1500 interrupt handler crash") this would trigger a crash such as: Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: iptable_mangle xt_MASQUERADE iptable_nat nf_nat xt_conntrack nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ipt_REJECT nf_reject_ipv4 xt_tcpudp tun bridge stp llc ip6table_filter ip6_tables iptable_filter fuse kvm_hv binfmt_misc squashfs mlx4_ib ib_uverbs dm_multipath scsi_dh_rdac scsi_dh_alua ib_core mlx4_en sr_mod cdrom bnx2x lpfc mlx4_core crc_t10dif scsi_transport_fc sg mdio vmx_crypto crct10dif_vpmsum leds_powernv powernv_rng rng_core led_class powernv_op_panel sunrpc ip_tables x_tables autofs4 CPU: 159 PID: 6854 Comm: fpu_denormal Not tainted 5.8.0-rc2-gcc-8.2.0-00092-g4ec7aaab0828 #192 NIP: c0000000000100ec LR: c00000000001b85c CTR: 0000000000000000 REGS: c000001dd818f770 TRAP: 1500 Not tainted (5.8.0-rc2-gcc-8.2.0-00092-g4ec7aaab0828) MSR: 900000000290b033 CR: 24002884 XER: 20000000 CFAR: c00000000001005c IRQMASK: 1 GPR00: c00000000001c4c8 c000001dd818fa00 c00000000171c200 c000001dd8101570 GPR04: 0000000000000000 c000001dd818fe90 c000001dd8101590 000000000000001d GPR08: 0000000000000010 0000000000002000 c000001dd818fe90 fffffffffc48ac60 GPR12: 0000000000002200 c000001ffff4f480 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: 0000000000000000 00007fffab225b40 0000000000000001 c000000001757168 GPR24: c000001dd8101570 c0000018027b00f0 c000001dd8101570 c000000001496098 GPR28: c00000000174ad05 c000001dd8100000 c000001dd8100000 c000001dd8100000 NIP save_fpu+0xa8/0x2ac LR __giveup_fpu+0x2c/0xd0 Call Trace: 0xc000001dd818fa80 (unreliable) giveup_all+0x118/0x120 __switch_to+0x124/0x6c0 __schedule+0x390/0xaf0 do_task_dead+0x70/0x80 do_exit+0x8fc/0xe10 do_group_exit+0x64/0xd0 sys_exit_group+0x24/0x30 system_call_exception+0x164/0x270 system_call_common+0xf0/0x278 Signed-off-by: Nicholas Piggin [mpe: Split out of fix patch, add oops log] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200708074942.1713396-1-npiggin@gmail.com --- tools/testing/selftests/powerpc/math/.gitignore | 1 + tools/testing/selftests/powerpc/math/Makefile | 2 +- .../testing/selftests/powerpc/math/fpu_denormal.c | 38 ++++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/math/fpu_denormal.c diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore index e31ca6f453ed..d0c23b2e4b60 100644 --- a/tools/testing/selftests/powerpc/math/.gitignore +++ b/tools/testing/selftests/powerpc/math/.gitignore @@ -6,3 +6,4 @@ vmx_preempt fpu_signal vmx_signal vsx_preempt +fpu_denormal diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile index 11a10d7a2bbd..4e2049d2fd8d 100644 --- a/tools/testing/selftests/powerpc/math/Makefile +++ b/tools/testing/selftests/powerpc/math/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt +TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt top_srcdir = ../../../../.. include ../../lib.mk diff --git a/tools/testing/selftests/powerpc/math/fpu_denormal.c b/tools/testing/selftests/powerpc/math/fpu_denormal.c new file mode 100644 index 000000000000..5f96682abaa8 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/fpu_denormal.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright IBM Corp. 2020 + * + * This test attempts to cause a FP denormal exception on POWER8 CPUs. Unfortunately + * if the denormal handler is not configured or working properly, this can cause a bad + * crash in kernel mode when the kernel tries to save FP registers when the process + * exits. + */ + +#include +#include + +#include "utils.h" + +static int test_denormal_fpu(void) +{ + unsigned int m32; + unsigned long m64; + volatile float f; + volatile double d; + + /* try to induce lfs ; stfd */ + + m32 = 0x00715fcf; /* random denormal */ + memcpy((float *)&f, &m32, sizeof(f)); + d = f; + memcpy(&m64, (double *)&d, sizeof(d)); + + FAIL_IF((long)(m64 != 0x380c57f3c0000000)); /* renormalised value */ + + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_denormal_fpu, "fpu_denormal"); +} -- cgit v1.2.3 From 7d38f089731fe129a49e254028caec6f05420f18 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 2 Jul 2020 14:09:21 +0000 Subject: docs: powerpc: Clarify book3s/32 MMU families Documentation wrongly tells that book3s/32 CPU have hash MMU. 603 and e300 core only have software loaded TLB. 755, 7450 family and e600 core have both hash MMU and software loaded TLB. This can be selected by setting a bit in HID2 (755) or HID0 (others). At the time being this is not supported by the kernel. Make this explicit in the documentation. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/261923c075d1cb49d02493685e8585d4ea2a5197.1593698951.git.christophe.leroy@csgroup.eu --- Documentation/powerpc/cpu_families.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Documentation/powerpc/cpu_families.rst b/Documentation/powerpc/cpu_families.rst index 1e063c5440c3..9b84e045e713 100644 --- a/Documentation/powerpc/cpu_families.rst +++ b/Documentation/powerpc/cpu_families.rst @@ -9,7 +9,9 @@ and are supported by arch/powerpc. Book3S (aka sPAPR) ------------------ -- Hash MMU +- Hash MMU (except 603 and e300) +- Software loaded TLB (603 and e300) +- Selectable Software loaded TLB in addition to hash MMU (755, 7450, e600) - Mix of 32 & 64 bit:: +--------------+ +----------------+ @@ -24,9 +26,9 @@ Book3S (aka sPAPR) | | | | v v - +--------------+ +----------------+ +-------+ - | 604 | | 750 (G3) | ---> | 750CX | - +--------------+ +----------------+ +-------+ + +--------------+ +-----+ +----------------+ +-------+ + | 604 | | 755 | <--- | 750 (G3) | ---> | 750CX | + +--------------+ +-----+ +----------------+ +-------+ | | | | | | v v v -- cgit v1.2.3 From b506923ee44ae87fc9f4de16b53feb313623e146 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 2 Jul 2020 11:52:02 +0000 Subject: Revert "powerpc/kasan: Fix shadow pages allocation failure" This reverts commit d2a91cef9bbdeb87b7449fdab1a6be6000930210. This commit moved too much work in kasan_init(). The allocation of shadow pages has to be moved for the reason explained in that patch, but the allocation of page tables still need to be done before switching to the final hash table. First revert the incorrect commit, following patch redoes it properly. Fixes: d2a91cef9bbd ("powerpc/kasan: Fix shadow pages allocation failure") Cc: stable@vger.kernel.org Reported-by: Erhard F. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://bugzilla.kernel.org/show_bug.cgi?id=208181 Link: https://lore.kernel.org/r/3667deb0911affbf999b99f87c31c77d5e870cd2.1593690707.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/kasan.h | 2 ++ arch/powerpc/mm/init_32.c | 2 ++ arch/powerpc/mm/kasan/kasan_init_32.c | 4 +--- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index be85c7005fb1..d635b96c7ea6 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -27,10 +27,12 @@ #ifdef CONFIG_KASAN void kasan_early_init(void); +void kasan_mmu_init(void); void kasan_init(void); void kasan_late_init(void); #else static inline void kasan_init(void) { } +static inline void kasan_mmu_init(void) { } static inline void kasan_late_init(void) { } #endif diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 5a5469eb3174..bf1717f8d5f4 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -171,6 +171,8 @@ void __init MMU_init(void) btext_unmap(); #endif + kasan_mmu_init(); + setup_kup(); /* Shortly after that, the entire linear mapping will be available */ diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 0760e1e754e4..4813c6d50889 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -117,7 +117,7 @@ static void __init kasan_unmap_early_shadow_vmalloc(void) kasan_update_early_region(k_start, k_end, __pte(0)); } -static void __init kasan_mmu_init(void) +void __init kasan_mmu_init(void) { int ret; struct memblock_region *reg; @@ -146,8 +146,6 @@ static void __init kasan_mmu_init(void) void __init kasan_init(void) { - kasan_mmu_init(); - kasan_remap_early_shadow_ro(); clear_page(kasan_early_shadow_page); -- cgit v1.2.3 From 41ea93cf7ba4e0f0cc46ebfdda8b6ff27c67bc91 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 2 Jul 2020 11:52:03 +0000 Subject: powerpc/kasan: Fix shadow pages allocation failure Doing kasan pages allocation in MMU_init is too early, kernel doesn't have access yet to the entire memory space and memblock_alloc() fails when the kernel is a bit big. Do it from kasan_init() instead. Fixes: 2edb16efc899 ("powerpc/32: Add KASAN support") Fixes: d2a91cef9bbd ("powerpc/kasan: Fix shadow pages allocation failure") Cc: stable@vger.kernel.org Reported-by: Erhard F. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://bugzilla.kernel.org/show_bug.cgi?id=208181 Link: https://lore.kernel.org/r/63048fcea8a1c02f75429ba3152f80f7853f87fc.1593690707.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/kasan/kasan_init_32.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 4813c6d50889..019b0c0bbbf3 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -120,11 +120,24 @@ static void __init kasan_unmap_early_shadow_vmalloc(void) void __init kasan_mmu_init(void) { int ret; + + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || + IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); + + if (ret) + panic("kasan: kasan_init_shadow_page_tables() failed"); + } +} + +void __init kasan_init(void) +{ struct memblock_region *reg; for_each_memblock(memory, reg) { phys_addr_t base = reg->base; phys_addr_t top = min(base + reg->size, total_lowmem); + int ret; if (base >= top) continue; @@ -134,18 +147,6 @@ void __init kasan_mmu_init(void) panic("kasan: kasan_init_region() failed"); } - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || - IS_ENABLED(CONFIG_KASAN_VMALLOC)) { - ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); - - if (ret) - panic("kasan: kasan_init_shadow_page_tables() failed"); - } - -} - -void __init kasan_init(void) -{ kasan_remap_early_shadow_ro(); clear_page(kasan_early_shadow_page); -- cgit v1.2.3 From 667e3c413ecf20371692fd2dc37e06dc14d0b140 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 7 Jul 2020 12:33:35 +0000 Subject: powerpc/signal_32: Remove !FULL_REGS() special handling in PPC64 save_general_regs() Since commit ("1bd79336a426 powerpc: Fix various syscall/signal/swapcontext bugs"), getting save_general_regs() called without FULL_REGS() is very unlikely and generates a warning. The 32-bit version of save_general_regs() doesn't take care of it at all and copies all registers anyway since that commit. Moreover, commit 965dd3ad3076 ("powerpc/64/syscall: Remove non-volatile GPR save optimisation") is another reason why it would never happen. So the same with 64-bit, don't worry about FULL_REGS() and copy all registers all the time. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/173de3b659fa3a5f126a0eb170522cccd909950f.1594125164.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/signal_32.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 1415c16ab628..0fd59fbc6d49 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -109,8 +109,6 @@ static inline int save_general_regs(struct pt_regs *regs, WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { - if (i == 14 && !FULL_REGS(regs)) - i = 32; if ( i == PT_SOFTE) { if(__put_user((unsigned int)softe, &frame->mc_gregs[i])) return -EFAULT; -- cgit v1.2.3 From 020c4831e01264f8b62af6ca9e669b7c51881a56 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 7 Jul 2020 12:33:36 +0000 Subject: powerpc/signal_32: Simplify loop in PPC64 save_general_regs() save_general_regs() which does special handling when i == PT_SOFTE. Rewrite it to minimise the specific part, especially the __put_user() and associated error handling is the same so make it common. Signed-off-by: Christophe Leroy [mpe: Use a regular if rather than ternary operator] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/47a38df46cae5a5a88a558a64d71f75e9c4d9950.1594125164.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/signal_32.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 0fd59fbc6d49..96950f189b5a 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -102,20 +102,18 @@ static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; - int i; - /* Force usr to alway see softe as 1 (interrupts enabled) */ - elf_greg_t64 softe = 0x1; + int val, i; WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { - if ( i == PT_SOFTE) { - if(__put_user((unsigned int)softe, &frame->mc_gregs[i])) - return -EFAULT; - else - continue; - } - if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) + /* Force usr to alway see softe as 1 (interrupts enabled) */ + if (i == PT_SOFTE) + val = 1; + else + val = gregs[i]; + + if (__put_user(val, &frame->mc_gregs[i])) return -EFAULT; } return 0; -- cgit v1.2.3 From 96032f983ca32ad1d43c73da922dbc7022754c3c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 7 Jul 2020 18:32:25 +0000 Subject: powerpc/signal64: Don't opencode page prefaulting Instead of doing a __get_user() from the first and last location into a tmp var which won't be used, use fault_in_pages_readable() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/810bd8840ef990a200f58c9dea9abe767ca02a3a.1594146723.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/signal_64.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 97729642f955..bfc939360bad 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -634,7 +635,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, struct ucontext __user *, new_ctx, long, ctx_size) { - unsigned char tmp; sigset_t set; unsigned long new_msr = 0; int ctx_has_vsx_region = 0; @@ -669,9 +669,8 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, } if (new_ctx == NULL) return 0; - if (!access_ok(new_ctx, ctx_size) - || __get_user(tmp, (u8 __user *) new_ctx) - || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) + if (!access_ok(new_ctx, ctx_size) || + fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) return -EFAULT; /* -- cgit v1.2.3 From 793d74a8c78e05d6833bfcf582e24e40bd92518f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 28 Apr 2020 13:16:47 +0000 Subject: powerpc/vdso64: Switch from __get_datapage() to get_datapage inline macro On the same way as already done on PPC32, drop __get_datapage() function and use get_datapage inline macro instead. See commit ec0895f08f99 ("powerpc/vdso32: inline __get_datapage()") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/e13d95312e0b9792556b19b4bb8955cc1ff19fc7.1588079622.git.christophe.leroy@c-s.fr --- arch/powerpc/kernel/vdso64/cacheflush.S | 9 ++++----- arch/powerpc/kernel/vdso64/datapage.S | 28 +++------------------------- arch/powerpc/kernel/vdso64/gettimeofday.S | 9 +++++---- 3 files changed, 12 insertions(+), 34 deletions(-) diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S index 526f5ba2593e..cab14324242b 100644 --- a/arch/powerpc/kernel/vdso64/cacheflush.S +++ b/arch/powerpc/kernel/vdso64/cacheflush.S @@ -8,6 +8,7 @@ #include #include #include +#include #include .text @@ -24,14 +25,12 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) .cfi_startproc mflr r12 .cfi_register lr,r12 - mr r11,r3 - bl V_LOCAL_FUNC(__get_datapage) + get_datapage r10, r0 mtlr r12 - mr r10,r3 lwz r7,CFG_DCACHE_BLOCKSZ(r10) addi r5,r7,-1 - andc r6,r11,r5 /* round low to line bdy */ + andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) @@ -48,7 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) lwz r7,CFG_ICACHE_BLOCKSZ(r10) addi r5,r7,-1 - andc r6,r11,r5 /* round low to line bdy */ + andc r6,r3,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index dc84f5ae3802..067247d3efb9 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -10,35 +10,13 @@ #include #include #include +#include .text .global __kernel_datapage_offset; __kernel_datapage_offset: .long 0 -V_FUNCTION_BEGIN(__get_datapage) - .cfi_startproc - /* We don't want that exposed or overridable as we want other objects - * to be able to bl directly to here - */ - .protected __get_datapage - .hidden __get_datapage - - mflr r0 - .cfi_register lr,r0 - - bcl 20,31,data_page_branch -data_page_branch: - mflr r3 - mtlr r0 - addi r3, r3, __kernel_datapage_offset-data_page_branch - lwz r0,0(r3) - .cfi_restore lr - add r3,r0,r3 - blr - .cfi_endproc -V_FUNCTION_END(__get_datapage) - /* * void *__kernel_get_syscall_map(unsigned int *syscall_count) ; * @@ -53,7 +31,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) mflr r12 .cfi_register lr,r12 mr r4,r3 - bl V_LOCAL_FUNC(__get_datapage) + get_datapage r3, r0 mtlr r12 addi r3,r3,CFG_SYSCALL_MAP64 cmpldi cr0,r4,0 @@ -75,7 +53,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 .cfi_register lr,r12 - bl V_LOCAL_FUNC(__get_datapage) + get_datapage r3, r0 ld r3,CFG_TB_TICKS_PER_SEC(r3) mtlr r12 crclr cr0*4+so diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 1c9a04703250..20f8be40c653 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -26,7 +27,7 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday) mr r11,r3 /* r11 holds tv */ mr r10,r4 /* r10 holds tz */ - bl V_LOCAL_FUNC(__get_datapage) /* get data page */ + get_datapage r3, r0 cmpldi r11,0 /* check if tv is NULL */ beq 2f lis r7,1000000@ha /* load up USEC_PER_SEC */ @@ -71,7 +72,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) mflr r12 /* r12 saves lr */ .cfi_register lr,r12 mr r11,r4 /* r11 saves tp */ - bl V_LOCAL_FUNC(__get_datapage) /* get data page */ + get_datapage r3, r0 lis r7,NSEC_PER_SEC@h /* want nanoseconds */ ori r7,r7,NSEC_PER_SEC@l beq cr5,70f @@ -188,7 +189,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) mflr r12 .cfi_register lr,r12 - bl V_LOCAL_FUNC(__get_datapage) + get_datapage r3, r0 lwz r5, CLOCK_HRTIMER_RES(r3) mtlr r12 li r3,0 @@ -221,7 +222,7 @@ V_FUNCTION_BEGIN(__kernel_time) .cfi_register lr,r12 mr r11,r3 /* r11 holds t */ - bl V_LOCAL_FUNC(__get_datapage) + get_datapage r3, r0 ld r4,STAMP_XTIME_SEC(r3) -- cgit v1.2.3 From 029ab30b4c0a7ec587eece1ec07c3981fdff2bed Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Fri, 3 Jul 2020 11:06:06 +0530 Subject: powerpc/mm: Enable radix GTSE only if supported. Make GTSE an MMU feature and enable it by default for radix. However for guest, conditionally enable it if hypervisor supports it via OV5 vector. Let prom_init ask for radix GTSE only if the support exists. Having GTSE as an MMU feature will make it easy to enable radix without GTSE. Currently radix assumes GTSE is enabled by default. Signed-off-by: Bharata B Rao Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703053608.12884-2-bharata@linux.ibm.com --- arch/powerpc/include/asm/mmu.h | 4 ++++ arch/powerpc/kernel/dt_cpu_ftrs.c | 1 + arch/powerpc/kernel/prom_init.c | 13 ++++++++----- arch/powerpc/mm/init_64.c | 5 ++++- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index f4ac25d4df05..884d51995934 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -28,6 +28,9 @@ * Individual features below. */ +/* Guest Translation Shootdown Enable */ +#define MMU_FTR_GTSE ASM_CONST(0x00001000) + /* * Support for 68 bit VA space. We added that from ISA 2.05 */ @@ -173,6 +176,7 @@ enum { #endif #ifdef CONFIG_PPC_RADIX_MMU MMU_FTR_TYPE_RADIX | + MMU_FTR_GTSE | #ifdef CONFIG_PPC_KUAP MMU_FTR_RADIX_KUAP | #endif /* CONFIG_PPC_KUAP */ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index a0edeb391e3e..ac650c233cd9 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -336,6 +336,7 @@ static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f) #ifdef CONFIG_PPC_RADIX_MMU cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; + cur_cpu_spec->mmu_features |= MMU_FTR_GTSE; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU; return 1; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 90c604d00b7d..cbc605cfdec0 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1336,12 +1336,15 @@ static void __init prom_check_platform_support(void) } } - if (supported.radix_mmu && supported.radix_gtse && - IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { - /* Radix preferred - but we require GTSE for now */ - prom_debug("Asking for radix with GTSE\n"); + if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) { + /* Radix preferred - Check if GTSE is also supported */ + prom_debug("Asking for radix\n"); ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); - ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); + if (supported.radix_gtse) + ibm_architecture_vec.vec5.radix_ext = + OV5_FEAT(OV5_RADIX_GTSE); + else + prom_debug("Radix GTSE isn't supported\n"); } else if (supported.hash_mmu) { /* Default to hash mmu (if we can) */ prom_debug("Asking for hash\n"); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index bc73abf0bc25..152aa0200cef 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -407,12 +407,15 @@ static void __init early_check_vec5(void) if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & OV5_FEAT(OV5_RADIX_GTSE))) { pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); - } + cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; + } else + cur_cpu_spec->mmu_features |= MMU_FTR_GTSE; /* Do radix anyway - the hypervisor said we had to */ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { /* Hypervisor only supports hash - disable radix */ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; + cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; } } -- cgit v1.2.3 From b6c84175078ff022b343b7b0737aeb33001ca90c Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Fri, 3 Jul 2020 11:06:07 +0530 Subject: powerpc/pseries: H_REGISTER_PROC_TBL should ask for GTSE only if enabled H_REGISTER_PROC_TBL asks for GTSE by default. GTSE flag bit should be set only when GTSE is supported. Signed-off-by: Bharata B Rao Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703053608.12884-3-bharata@linux.ibm.com --- arch/powerpc/platforms/pseries/lpar.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index fd26f3d21d7b..f82569a505f1 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1680,9 +1680,11 @@ static int pseries_lpar_register_process_table(unsigned long base, if (table_size) flags |= PROC_TABLE_NEW; - if (radix_enabled()) - flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; - else + if (radix_enabled()) { + flags |= PROC_TABLE_RADIX; + if (mmu_has_feature(MMU_FTR_GTSE)) + flags |= PROC_TABLE_GTSE; + } else flags |= PROC_TABLE_HPT_SLB; for (;;) { rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, -- cgit v1.2.3 From dd3d9aa5589c52efaec12ffeb84f0f5f8208fbc3 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 11:06:08 +0530 Subject: powerpc/mm/book3s64/radix: Off-load TLB invalidations to host when !GTSE When platform doesn't support GTSE, let TLB invalidation requests for radix guests be off-loaded to the host using H_RPT_INVALIDATE hcall. [hcall wrapper, error path handling and renames] Signed-off-by: Nicholas Piggin Signed-off-by: Bharata B Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703053608.12884-4-bharata@linux.ibm.com --- .../powerpc/include/asm/book3s/64/tlbflush-radix.h | 15 ++++ arch/powerpc/include/asm/hvcall.h | 34 ++++++++- arch/powerpc/include/asm/plpar_wrappers.h | 52 ++++++++++++++ arch/powerpc/mm/book3s64/radix_tlb.c | 82 ++++++++++++++++++++-- 4 files changed, 175 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index ca8db193ae38..94439e0cefc9 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -2,10 +2,25 @@ #ifndef _ASM_POWERPC_TLBFLUSH_RADIX_H #define _ASM_POWERPC_TLBFLUSH_RADIX_H +#include + struct vm_area_struct; struct mm_struct; struct mmu_gather; +static inline u64 psize_to_rpti_pgsize(unsigned long psize) +{ + if (psize == MMU_PAGE_4K) + return H_RPTI_PAGE_4K; + if (psize == MMU_PAGE_64K) + return H_RPTI_PAGE_64K; + if (psize == MMU_PAGE_2M) + return H_RPTI_PAGE_2M; + if (psize == MMU_PAGE_1G) + return H_RPTI_PAGE_1G; + return H_RPTI_PAGE_ALL; +} + static inline int mmu_get_ap(int psize) { return mmu_psize_defs[psize].ap; diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index e90c073e437e..43486e773bd6 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -305,7 +305,8 @@ #define H_SCM_UNBIND_ALL 0x3FC #define H_SCM_HEALTH 0x400 #define H_SCM_PERFORMANCE_STATS 0x418 -#define MAX_HCALL_OPCODE H_SCM_PERFORMANCE_STATS +#define H_RPT_INVALIDATE 0x448 +#define MAX_HCALL_OPCODE H_RPT_INVALIDATE /* Scope args for H_SCM_UNBIND_ALL */ #define H_UNBIND_SCOPE_ALL (0x1) @@ -389,6 +390,37 @@ #define PROC_TABLE_RADIX 0x04 #define PROC_TABLE_GTSE 0x01 +/* + * Defines for + * H_RPT_INVALIDATE - Invalidate RPT translation lookaside information. + */ + +/* Type of translation to invalidate (type) */ +#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */ +#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */ +#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */ +/* Invalidate Process Table Entries if H_RPTI_TYPE_NESTED is clear */ +#define H_RPTI_TYPE_PRT 0x0008 +/* Invalidate Partition Table Entries if H_RPTI_TYPE_NESTED is set */ +#define H_RPTI_TYPE_PAT 0x0008 +#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \ + H_RPTI_TYPE_PRT) +#define H_RPTI_TYPE_NESTED_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \ + H_RPTI_TYPE_PAT) + +/* Invalidation targets (target) */ +#define H_RPTI_TARGET_CMMU 0x01 /* All virtual processors in the partition */ +#define H_RPTI_TARGET_CMMU_LOCAL 0x02 /* Current virtual processor */ +/* All nest/accelerator agents in use by the partition */ +#define H_RPTI_TARGET_NMMU 0x04 + +/* Page size mask (page sizes) */ +#define H_RPTI_PAGE_4K 0x01 +#define H_RPTI_PAGE_64K 0x02 +#define H_RPTI_PAGE_2M 0x04 +#define H_RPTI_PAGE_1G 0x08 +#define H_RPTI_PAGE_ALL (-1UL) + #ifndef __ASSEMBLY__ #include diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 4497c8afb573..4293c5d2ddf4 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -334,6 +334,51 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) return rc; } +/* + * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately + * + * - Returns H_SUCCESS on success + * - For H_BUSY return value, we retry the hcall. + * - For any other hcall failures, attempt a full flush once before + * resorting to BUG(). + * + * Note: This hcall is expected to fail only very rarely. The correct + * error recovery of killing the process/guest will be eventually + * needed. + */ +static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type, + u64 page_sizes, u64 start, u64 end) +{ + long rc; + unsigned long all; + + while (true) { + rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type, + page_sizes, start, end); + if (rc == H_BUSY) { + cpu_relax(); + continue; + } else if (rc == H_SUCCESS) + return rc; + + /* Flush request failed, try with a full flush once */ + if (type & H_RPTI_TYPE_NESTED) + all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL; + else + all = H_RPTI_TYPE_ALL; +retry: + rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, + all, page_sizes, 0, -1UL); + if (rc == H_BUSY) { + cpu_relax(); + goto retry; + } else if (rc == H_SUCCESS) + return rc; + + BUG(); + } +} + #else /* !CONFIG_PPC_PSERIES */ static inline long plpar_set_ciabr(unsigned long ciabr) @@ -346,6 +391,13 @@ static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex, { return 0; } + +static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type, + u64 page_sizes, u64 start, u64 end) +{ + return 0; +} + #endif /* CONFIG_PPC_PSERIES */ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index b5cc9b23cf02..0d233763441f 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -16,6 +16,7 @@ #include #include #include +#include #define RIC_FLUSH_TLB 0 #define RIC_FLUSH_PWC 1 @@ -694,7 +695,14 @@ void radix__flush_tlb_mm(struct mm_struct *mm) goto local; } - if (cputlb_use_tlbie()) { + if (!mmu_has_feature(MMU_FTR_GTSE)) { + unsigned long tgt = H_RPTI_TARGET_CMMU; + + if (atomic_read(&mm->context.copros) > 0) + tgt |= H_RPTI_TARGET_NMMU; + pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, + H_RPTI_PAGE_ALL, 0, -1UL); + } else if (cputlb_use_tlbie()) { if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL); else @@ -727,7 +735,16 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm) goto local; } } - if (cputlb_use_tlbie()) + if (!mmu_has_feature(MMU_FTR_GTSE)) { + unsigned long tgt = H_RPTI_TARGET_CMMU; + unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | + H_RPTI_TYPE_PRT; + + if (atomic_read(&mm->context.copros) > 0) + tgt |= H_RPTI_TARGET_NMMU; + pseries_rpt_invalidate(pid, tgt, type, + H_RPTI_PAGE_ALL, 0, -1UL); + } else if (cputlb_use_tlbie()) _tlbie_pid(pid, RIC_FLUSH_ALL); else _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); @@ -760,7 +777,19 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, exit_flush_lazy_tlbs(mm); goto local; } - if (cputlb_use_tlbie()) + if (!mmu_has_feature(MMU_FTR_GTSE)) { + unsigned long tgt, pg_sizes, size; + + tgt = H_RPTI_TARGET_CMMU; + pg_sizes = psize_to_rpti_pgsize(psize); + size = 1UL << mmu_psize_to_shift(psize); + + if (atomic_read(&mm->context.copros) > 0) + tgt |= H_RPTI_TARGET_NMMU; + pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, + pg_sizes, vmaddr, + vmaddr + size); + } else if (cputlb_use_tlbie()) _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB); else _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB); @@ -810,7 +839,14 @@ static inline void _tlbiel_kernel_broadcast(void) */ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) { - if (cputlb_use_tlbie()) + if (!mmu_has_feature(MMU_FTR_GTSE)) { + unsigned long tgt = H_RPTI_TARGET_CMMU | H_RPTI_TARGET_NMMU; + unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | + H_RPTI_TYPE_PRT; + + pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL, + start, end); + } else if (cputlb_use_tlbie()) _tlbie_pid(0, RIC_FLUSH_ALL); else _tlbiel_kernel_broadcast(); @@ -864,7 +900,17 @@ is_local: nr_pages > tlb_local_single_page_flush_ceiling); } - if (full) { + if (!mmu_has_feature(MMU_FTR_GTSE) && !local) { + unsigned long tgt = H_RPTI_TARGET_CMMU; + unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize); + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M); + if (atomic_read(&mm->context.copros) > 0) + tgt |= H_RPTI_TARGET_NMMU; + pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes, + start, end); + } else if (full) { if (local) { _tlbiel_pid(pid, RIC_FLUSH_TLB); } else { @@ -1046,7 +1092,17 @@ is_local: nr_pages > tlb_local_single_page_flush_ceiling); } - if (full) { + if (!mmu_has_feature(MMU_FTR_GTSE) && !local) { + unsigned long tgt = H_RPTI_TARGET_CMMU; + unsigned long type = H_RPTI_TYPE_TLB; + unsigned long pg_sizes = psize_to_rpti_pgsize(psize); + + if (also_pwc) + type |= H_RPTI_TYPE_PWC; + if (atomic_read(&mm->context.copros) > 0) + tgt |= H_RPTI_TARGET_NMMU; + pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end); + } else if (full) { if (local) { _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB); } else { @@ -1111,7 +1167,19 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) exit_flush_lazy_tlbs(mm); goto local; } - if (cputlb_use_tlbie()) + if (!mmu_has_feature(MMU_FTR_GTSE)) { + unsigned long tgt, type, pg_sizes; + + tgt = H_RPTI_TARGET_CMMU; + type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | + H_RPTI_TYPE_PRT; + pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize); + + if (atomic_read(&mm->context.copros) > 0) + tgt |= H_RPTI_TARGET_NMMU; + pseries_rpt_invalidate(pid, tgt, type, pg_sizes, + addr, end); + } else if (cputlb_use_tlbie()) _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); else _tlbiel_va_range_multicast(mm, -- cgit v1.2.3 From c83040192f3763b243ece26073d61a895b4a230f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:29 +0530 Subject: powerpc/pmem: Restrict papr_scm to P8 and above. The PAPR based virtualized persistent memory devices are only supported on POWER9 and above. In the followup patch, the kernel will switch the persistent memory cache flush functions to use a new `dcbf` variant instruction. The new instructions even though added in ISA 3.1 works even on P8 and P9 because these are implemented as a variant of existing `dcbf` and `hwsync` and on P8 and P9 behaves as such. Considering these devices are only supported on P8 and above, update the driver to prevent a P7-compat guest from using persistent memory devices. We don't update of_pmem driver with the same condition, because, on bare-metal, the firmware enables pmem support only on P9 and above. There the kernel depends on OPAL firmware to restrict exposing persistent memory related device tree entries on older hardware. of_pmem.ko is written without any arch dependency and we don't want to add ppc64 specific cpu feature check in of_pmem driver. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-2-aneesh.kumar@linux.ibm.com --- arch/powerpc/platforms/pseries/pmem.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index f860a897a9e0..2347e1038f58 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -147,6 +147,12 @@ const struct of_device_id drc_pmem_match[] = { static int pseries_pmem_init(void) { + /* + * Only supported on POWER8 and above. + */ + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) + return 0; + pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory"); if (!pmem_node) return 0; -- cgit v1.2.3 From 32db09d992ddc7d145595cff49cccfe14e018266 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:30 +0530 Subject: powerpc/pmem: Add new instructions for persistent storage and sync POWER10 introduces two new variants of dcbf instructions (dcbstps and dcbfps) that can be used to write modified locations back to persistent storage. Additionally, POWER10 also introduce phwsync and plwsync which can be used to establish order of these writes to persistent storage. This patch exposes these instructions to the rest of the kernel. The existing dcbf and hwsync instructions in P8 and P9 are adequate to enable appropriate synchronization with OpenCAPI-hosted persistent storage. Hence the new instructions are added as a variant of the old ones that old hardware won't differentiate. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-3-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index bce879fb9afd..777d5056a71c 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -219,6 +219,8 @@ #define PPC_INST_STWCX 0x7c00012d #define PPC_INST_LWSYNC 0x7c2004ac #define PPC_INST_SYNC 0x7c0004ac +#define PPC_INST_PHWSYNC 0x7c8004ac +#define PPC_INST_PLWSYNC 0x7ca004ac #define PPC_INST_SYNC_MASK 0xfc0007fe #define PPC_INST_ISYNC 0x4c00012c #define PPC_INST_LXVD2X 0x7c000698 @@ -284,6 +286,8 @@ #define PPC_INST_TABORT 0x7c00071d #define PPC_INST_TSR 0x7c0005dd +#define PPC_INST_DCBF 0x7c0000ac + #define PPC_INST_NAP 0x4c000364 #define PPC_INST_SLEEP 0x4c0003a4 #define PPC_INST_WINKLE 0x4c0003e4 @@ -533,6 +537,14 @@ #define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \ __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_DCBFPS(a, b) stringify_in_c(.long PPC_INST_DCBF | \ + ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21)) +#define PPC_DCBSTPS(a, b) stringify_in_c(.long PPC_INST_DCBF | \ + ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21)) + +#define PPC_PHWSYNC stringify_in_c(.long PPC_INST_PHWSYNC) +#define PPC_PLWSYNC stringify_in_c(.long PPC_INST_PLWSYNC) + /* * Define what the VSX XX1 form instructions will look like, then add * the 128 bit load store instructions based on that. -- cgit v1.2.3 From d358042793183a57094dac45a44116e1165ac593 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:31 +0530 Subject: powerpc/pmem: Add flush routines using new pmem store and sync instruction Start using dcbstps; phwsync; sequence for flushing persistent memory range. The new instructions are implemented as a variant of dcbf and hwsync and on P8 and P9 they will be executed as those instructions. We avoid using them on older hardware. This helps to avoid difficult to debug bugs. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-4-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/cacheflush.h | 1 + arch/powerpc/lib/pmem.c | 50 ++++++++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index de600b915a3c..54764c6e922d 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -6,6 +6,7 @@ #include #include +#include #ifdef CONFIG_PPC_BOOK3S_64 /* diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index 0666a8d29596..5a61aaeb6930 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -9,20 +9,62 @@ #include +static inline void __clean_pmem_range(unsigned long start, unsigned long stop) +{ + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); + void *addr = (void *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); + unsigned long i; + + for (i = 0; i < size >> shift; i++, addr += bytes) + asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory"); + + + asm volatile(PPC_PHWSYNC ::: "memory"); +} + +static inline void __flush_pmem_range(unsigned long start, unsigned long stop) +{ + unsigned long shift = l1_dcache_shift(); + unsigned long bytes = l1_dcache_bytes(); + void *addr = (void *)(start & ~(bytes - 1)); + unsigned long size = stop - (unsigned long)addr + (bytes - 1); + unsigned long i; + + for (i = 0; i < size >> shift; i++, addr += bytes) + asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory"); + + + asm volatile(PPC_PHWSYNC ::: "memory"); +} + +static inline void clean_pmem_range(unsigned long start, unsigned long stop) +{ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + return __clean_pmem_range(start, stop); +} + +static inline void flush_pmem_range(unsigned long start, unsigned long stop) +{ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + return __flush_pmem_range(start, stop); +} + /* * CONFIG_ARCH_HAS_PMEM_API symbols */ void arch_wb_cache_pmem(void *addr, size_t size) { unsigned long start = (unsigned long) addr; - flush_dcache_range(start, start + size); + clean_pmem_range(start, start + size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { unsigned long start = (unsigned long) addr; - flush_dcache_range(start, start + size); + flush_pmem_range(start, start + size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); @@ -35,7 +77,7 @@ long __copy_from_user_flushcache(void *dest, const void __user *src, unsigned long copied, start = (unsigned long) dest; copied = __copy_from_user(dest, src, size); - flush_dcache_range(start, start + size); + clean_pmem_range(start, start + size); return copied; } @@ -45,7 +87,7 @@ void *memcpy_flushcache(void *dest, const void *src, size_t size) unsigned long start = (unsigned long) dest; memcpy(dest, src, size); - flush_dcache_range(start, start + size); + clean_pmem_range(start, start + size); return dest; } -- cgit v1.2.3 From 3e79f082ebfc130360bcee23e4dd74729dcafdf4 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:32 +0530 Subject: libnvdimm/nvdimm/flush: Allow architecture to override the flush barrier Architectures like ppc64 provide persistent memory specific barriers that will ensure that all stores for which the modifications are written to persistent storage by preceding dcbfps and dcbstps instructions have updated persistent storage before any data access or data transfer caused by subsequent instructions is initiated. This is in addition to the ordering done by wmb() Update nvdimm core such that architecture can use barriers other than wmb to ensure all previous writes are architecturally visible for the platform buffer flush. Signed-off-by: Aneesh Kumar K.V Reviewed-by: Dan Williams Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-5-aneesh.kumar@linux.ibm.com --- Documentation/memory-barriers.txt | 14 ++++++++++++++ drivers/md/dm-writecache.c | 2 +- drivers/nvdimm/region_devs.c | 8 ++++---- include/asm-generic/barrier.h | 10 ++++++++++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index eaabc3134294..ff07cd3b2f82 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1935,6 +1935,20 @@ There are some more advanced barrier functions: relaxed I/O accessors and the Documentation/DMA-API.txt file for more information on consistent memory. + (*) pmem_wmb(); + + This is for use with persistent memory to ensure that stores for which + modifications are written to persistent storage reached a platform + durability domain. + + For example, after a non-temporal write to pmem region, we use pmem_wmb() + to ensure that stores have reached a platform durability domain. This ensures + that stores have updated persistent storage before any data access or + data transfer caused by subsequent instructions is initiated. This is + in addition to the ordering done by wmb(). + + For load from persistent memory, existing read memory barriers are sufficient + to ensure read ordering. =============================== IMPLICIT KERNEL MEMORY BARRIERS diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 74f3c506f084..00534fa4a384 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -536,7 +536,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) { if (WC_MODE_PMEM(wc)) - wmb(); + pmem_wmb(); else ssd_commit_flushed(wc, wait_for_ios); } diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 4502f9c4708d..c3237c2b03a6 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1206,13 +1206,13 @@ int generic_nvdimm_flush(struct nd_region *nd_region) idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); /* - * The first wmb() is needed to 'sfence' all previous writes - * such that they are architecturally visible for the platform - * buffer flush. Note that we've already arranged for pmem + * The pmem_wmb() is needed to 'sfence' all + * previous writes such that they are architecturally visible for + * the platform buffer flush. Note that we've already arranged for pmem * writes to avoid the cache via memcpy_flushcache(). The final * wmb() ensures ordering for the NVDIMM flush write. */ - wmb(); + pmem_wmb(); for (i = 0; i < nd_region->ndr_mappings; i++) if (ndrd_get_flush_wpq(ndrd, i, 0)) writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 2eacaf7d62f6..b589bb216ee5 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -257,5 +257,15 @@ do { \ }) #endif +/* + * pmem_wmb() ensures that all stores for which the modification + * are written to persistent storage by preceding instructions have + * updated persistent storage before any data access or data transfer + * caused by subsequent instructions is initiated. + */ +#ifndef pmem_wmb +#define pmem_wmb() wmb() +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ -- cgit v1.2.3 From 76e6c73f33d4e1cc4de4f25c0bf66d59e42113c4 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:33 +0530 Subject: powerpc/pmem: Update ppc64 to use the new barrier instruction. pmem on POWER10 can now use phwsync instead of hwsync to ensure all previous writes are architecturally visible for the platform buffer flush. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-6-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/barrier.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 123adcefd40f..35c1b8f3aa68 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -7,6 +7,10 @@ #include +#ifndef __ASSEMBLY__ +#include +#endif + /* * Memory barrier. * The sync instruction guarantees that all memory accesses initiated @@ -97,6 +101,15 @@ do { \ #define barrier_nospec() #endif /* CONFIG_PPC_BARRIER_NOSPEC */ +/* + * pmem_wmb() ensures that all stores for which the modification + * are written to persistent storage by preceding dcbfps/dcbstps + * instructions have updated persistent storage before any data + * access or data transfer caused by subsequent instructions is + * initiated. + */ +#define pmem_wmb() __asm__ __volatile__(PPC_PHWSYNC ::: "memory") + #include #endif /* _ASM_POWERPC_BARRIER_H */ -- cgit v1.2.3 From 436499ab868f1a9e497cfdbf641affe8a122c571 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:34 +0530 Subject: powerpc/pmem: Avoid the barrier in flush routines nvdimm expect the flush routines to just mark the cache clean. The barrier that mark the store globally visible is done in nvdimm_flush(). Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-7-aneesh.kumar@linux.ibm.com --- arch/powerpc/lib/pmem.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index 5a61aaeb6930..21210fa676e5 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -19,9 +19,6 @@ static inline void __clean_pmem_range(unsigned long start, unsigned long stop) for (i = 0; i < size >> shift; i++, addr += bytes) asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory"); - - - asm volatile(PPC_PHWSYNC ::: "memory"); } static inline void __flush_pmem_range(unsigned long start, unsigned long stop) @@ -34,9 +31,6 @@ static inline void __flush_pmem_range(unsigned long start, unsigned long stop) for (i = 0; i < size >> shift; i++, addr += bytes) asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory"); - - - asm volatile(PPC_PHWSYNC ::: "memory"); } static inline void clean_pmem_range(unsigned long start, unsigned long stop) -- cgit v1.2.3 From 8c26ab72663b4affc31e47cdf77d61d0172d1033 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:35 +0530 Subject: powerpc/pmem: Initialize pmem device on newer hardware With kernel now supporting new pmem flush/sync instructions, we can now enable the kernel to initialize the device. On P10 these devices would appear with a new compatible string. For PAPR device we have compatible "ibm,pmemory-v2" and for OF pmem device we have compatible "pmem-region-v2" Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-8-aneesh.kumar@linux.ibm.com --- arch/powerpc/platforms/pseries/papr_scm.c | 1 + drivers/nvdimm/of_pmem.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 9c569078a09f..66c19c0fe566 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -876,6 +876,7 @@ static int papr_scm_remove(struct platform_device *pdev) static const struct of_device_id papr_scm_match[] = { { .compatible = "ibm,pmemory" }, + { .compatible = "ibm,pmemory-v2" }, { }, }; diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 6826a274a1f1..10dbdcdfb9ce 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -90,6 +90,7 @@ static int of_pmem_region_remove(struct platform_device *pdev) static const struct of_device_id of_pmem_region_match[] = { { .compatible = "pmem-region" }, + { .compatible = "pmem-region-v2" }, { }, }; -- cgit v1.2.3 From 891b4fe8fe3d09f20948b391f24c9fc5b7580a2b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 24 Jun 2020 09:41:37 +1000 Subject: powerpc/64s: restore_math remove TM test The TM test in restore_math added by commit dc16b553c949e ("powerpc: Always restore FPU/VEC/VSX if hardware transactional memory in use") is no longer necessary after commit a8318c13e79ba ("powerpc/tm: Fix restoring FP/VMX facility incorrectly on interrupts"), which removed the cases where restore_math has to restore if TM is active. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200623234139.2262227-1-npiggin@gmail.com --- arch/powerpc/kernel/process.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4650b9bb217f..02d691bda9b6 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -530,8 +530,7 @@ void notrace restore_math(struct pt_regs *regs) { unsigned long msr; - if (!MSR_TM_ACTIVE(regs->msr) && - !current->thread.load_fp && !loadvec(current->thread)) + if (!current->thread.load_fp && !loadvec(current->thread)) return; msr = regs->msr; -- cgit v1.2.3 From 01eb01877f3386d4bd5de75909abdd0af45a5fa2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 24 Jun 2020 09:41:38 +1000 Subject: powerpc/64s: Fix restore_math unnecessarily changing MSR Before returning to user, if there are missing FP/VEC/VSX bits from the user MSR then those registers had been saved and must be restored again before use. restore_math will decide whether to restore immediately, or skip the restore and let fp/vec/vsx unavailable faults demand load the registers. Each time restore_math restores one of the FP/VSX or VEC register sets is loaded, an 8-bit counter is incremented (load_fp and load_vec). When these wrap to zero, restore_math no longer restores that register set until after they are next demand faulted. It's quite usual for those counters to have different values, so if one wraps to zero and restore_math no longer restores its registers or user MSR bit but the other is not zero yet does not need to be restored (because the kernel is not frequently using the FPU), then restore_math will be called and it will also not return in the early exit check. This causes msr_check_and_set to test and set the MSR at every kernel exit despite having no work to do. This can cause workloads (e.g., a NULL syscall microbenchmark) to run fast for a time while both counters are non-zero, then slow down when one of the counters reaches zero, then speed up again after the second counter reaches zero. The cost is significant, about 10% slowdown on a NULL syscall benchmark, and the jittery behaviour is very undesirable. Fix this by having restore_math test all conditions first, and only update MSR if we will be loading registers. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200623234139.2262227-2-npiggin@gmail.com --- arch/powerpc/kernel/process.c | 100 +++++++++++++++++++++++---------------- arch/powerpc/kernel/syscall_64.c | 8 ++++ 2 files changed, 68 insertions(+), 40 deletions(-) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 02d691bda9b6..9be4759f3bbb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -471,49 +471,58 @@ EXPORT_SYMBOL(giveup_all); #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_FPU -static int restore_fp(struct task_struct *tsk) +static bool should_restore_fp(void) { - if (tsk->thread.load_fp) { - load_fp_state(¤t->thread.fp_state); + if (current->thread.load_fp) { current->thread.load_fp++; - return 1; + return true; } - return 0; + return false; +} + +static void do_restore_fp(void) +{ + load_fp_state(¤t->thread.fp_state); } #else -static int restore_fp(struct task_struct *tsk) { return 0; } +static bool should_restore_fp(void) { return false; } +static void do_restore_fp(void) { } #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC -#define loadvec(thr) ((thr).load_vec) -static int restore_altivec(struct task_struct *tsk) +static bool should_restore_altivec(void) { - if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { - load_vr_state(&tsk->thread.vr_state); - tsk->thread.used_vr = 1; - tsk->thread.load_vec++; - - return 1; + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) { + current->thread.load_vec++; + return true; } - return 0; + return false; +} + +static void do_restore_altivec(void) +{ + load_vr_state(¤t->thread.vr_state); + current->thread.used_vr = 1; } #else -#define loadvec(thr) 0 -static inline int restore_altivec(struct task_struct *tsk) { return 0; } +static bool should_restore_altivec(void) { return false; } +static void do_restore_altivec(void) { } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX -static int restore_vsx(struct task_struct *tsk) +static bool should_restore_vsx(void) { - if (cpu_has_feature(CPU_FTR_VSX)) { - tsk->thread.used_vsr = 1; - return 1; - } - - return 0; + if (cpu_has_feature(CPU_FTR_VSX)) + return true; + return false; +} +static void do_restore_vsx(void) +{ + current->thread.used_vsr = 1; } #else -static inline int restore_vsx(struct task_struct *tsk) { return 0; } +static bool should_restore_vsx(void) { return false; } +static void do_restore_vsx(void) { } #endif /* CONFIG_VSX */ /* @@ -529,31 +538,42 @@ static inline int restore_vsx(struct task_struct *tsk) { return 0; } void notrace restore_math(struct pt_regs *regs) { unsigned long msr; - - if (!current->thread.load_fp && !loadvec(current->thread)) - return; + unsigned long new_msr = 0; msr = regs->msr; - msr_check_and_set(msr_all_available); /* - * Only reload if the bit is not set in the user MSR, the bit BEING set - * indicates that the registers are hot + * new_msr tracks the facilities that are to be restored. Only reload + * if the bit is not set in the user MSR (if it is set, the registers + * are live for the user thread). */ - if ((!(msr & MSR_FP)) && restore_fp(current)) - msr |= MSR_FP | current->thread.fpexc_mode; + if ((!(msr & MSR_FP)) && should_restore_fp()) + new_msr |= MSR_FP | current->thread.fpexc_mode; - if ((!(msr & MSR_VEC)) && restore_altivec(current)) - msr |= MSR_VEC; + if ((!(msr & MSR_VEC)) && should_restore_altivec()) + new_msr |= MSR_VEC; - if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) && - restore_vsx(current)) { - msr |= MSR_VSX; + if ((!(msr & MSR_VSX)) && should_restore_vsx()) { + if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) + new_msr |= MSR_VSX; } - msr_check_and_clear(msr_all_available); + if (new_msr) { + msr_check_and_set(new_msr); + + if (new_msr & MSR_FP) + do_restore_fp(); + + if (new_msr & MSR_VEC) + do_restore_altivec(); - regs->msr = msr; + if (new_msr & MSR_VSX) + do_restore_vsx(); + + msr_check_and_clear(new_msr); + + regs->msr |= new_msr; + } } #endif diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 79edba3ab312..5126f1d3184a 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -206,6 +206,13 @@ again: else if (cpu_has_feature(CPU_FTR_ALTIVEC)) mathflags |= MSR_VEC; + /* + * If userspace MSR has all available FP bits set, + * then they are live and no need to restore. If not, + * it means the regs were given up and restore_math + * may decide to restore them (to avoid taking an FP + * fault). + */ if ((regs->msr & mathflags) != mathflags) restore_math(regs); } @@ -277,6 +284,7 @@ again: else if (cpu_has_feature(CPU_FTR_ALTIVEC)) mathflags |= MSR_VEC; + /* See above restore_math comment */ if ((regs->msr & mathflags) != mathflags) restore_math(regs); } -- cgit v1.2.3 From b2b46304e9360f3dda49c9d8ba4a1478b9eecf1d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 24 Jun 2020 09:41:39 +1000 Subject: powerpc: re-initialise lazy FPU/VEC counters on every fault When a FP/VEC/VSX unavailable fault loads registers and enables the facility in the MSR, re-set the lazy restore counters to 1 rather than incrementing them so every fault gets the same number of restores before the next fault. This probably shouldn't be a practical change because if a lazy counter was non-zero then it should have been restored and would not cause a fault when userspace tries to access it. However the code and comment implies otherwise so that's misleading and unnecessary. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200623234139.2262227-3-npiggin@gmail.com --- arch/powerpc/kernel/fpu.S | 4 +--- arch/powerpc/kernel/vector.S | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index cac22cb97a8c..4ae39db70044 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -107,9 +107,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) or r12,r12,r4 std r12,_MSR(r1) #endif - /* Don't care if r4 overflows, this is desired behaviour */ - lbz r4,THREAD_LOAD_FP(r5) - addi r4,r4,1 + li r4,1 stb r4,THREAD_LOAD_FP(r5) addi r10,r5,THREAD_FPSTATE lfd fr0,FPSTATE_FPSCR(r10) diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index efc5b52f95d2..801dc28fdcca 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -76,9 +76,7 @@ _GLOBAL(load_up_altivec) oris r12,r12,MSR_VEC@h std r12,_MSR(r1) #endif - /* Don't care if r4 overflows, this is desired behaviour */ - lbz r4,THREAD_LOAD_VEC(r5) - addi r4,r4,1 + li r4,1 stb r4,THREAD_LOAD_VEC(r5) addi r6,r5,THREAD_VRSTATE li r4,1 -- cgit v1.2.3 From 1026798c644bfd3115fc4e32fd5e767cfc30ccf1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Jun 2020 17:06:04 +1000 Subject: powerpc/security: re-name count cache flush to branch cache flush The count cache flush mostly refers to both count cache and link stack flushing. As a first step to untangling these a bit, re-name the bits that apply to both. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609070610.846703-2-npiggin@gmail.com --- arch/powerpc/include/asm/asm-prototypes.h | 4 ++-- arch/powerpc/kernel/entry_64.S | 7 +++--- arch/powerpc/kernel/security.c | 36 +++++++++++++++---------------- 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7d81e86a1e5d..fa9057360e88 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -144,13 +144,13 @@ void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); /* Patch sites */ -extern s32 patch__call_flush_count_cache; +extern s32 patch__call_flush_branch_caches; extern s32 patch__flush_count_cache_return; extern s32 patch__flush_link_stack_return; extern s32 patch__call_kvm_flush_link_stack; extern s32 patch__memset_nocache, patch__memcpy_nocache; -extern long flush_count_cache; +extern long flush_branch_caches; extern long kvm_flush_link_stack; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 1013adc9acfb..bdef9c11bad2 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -259,8 +259,7 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs); #define FLUSH_COUNT_CACHE \ 1: nop; \ - patch_site 1b, patch__call_flush_count_cache - + patch_site 1b, patch__call_flush_branch_caches #define BCCTR_FLUSH .long 0x4c400420 @@ -271,8 +270,8 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs); .endm .balign 32 -.global flush_count_cache -flush_count_cache: +.global flush_branch_caches +flush_branch_caches: /* Save LR into r9 */ mflr r9 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index d86701ce116b..df2a3eff950b 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -21,12 +21,12 @@ u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; -enum count_cache_flush_type { - COUNT_CACHE_FLUSH_NONE = 0x1, - COUNT_CACHE_FLUSH_SW = 0x2, - COUNT_CACHE_FLUSH_HW = 0x4, +enum branch_cache_flush_type { + BRANCH_CACHE_FLUSH_NONE = 0x1, + BRANCH_CACHE_FLUSH_SW = 0x2, + BRANCH_CACHE_FLUSH_HW = 0x4, }; -static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; +static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; static bool link_stack_flush_enabled; bool barrier_nospec_enabled; @@ -222,10 +222,10 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (link_stack_flush_enabled) seq_buf_printf(&s, ", Software link stack flush"); - } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { + } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { seq_buf_printf(&s, "Mitigation: Software count cache flush"); - if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) seq_buf_printf(&s, " (hardware accelerated)"); if (link_stack_flush_enabled) @@ -429,18 +429,18 @@ device_initcall(stf_barrier_debugfs_init); static void no_count_cache_flush(void) { - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; pr_info("count-cache-flush: software flush disabled.\n"); } -static void toggle_count_cache_flush(bool enable) +static void toggle_branch_cache_flush(bool enable) { if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) enable = false; if (!enable) { - patch_instruction_site(&patch__call_flush_count_cache, + patch_instruction_site(&patch__call_flush_branch_caches, ppc_inst(PPC_INST_NOP)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE patch_instruction_site(&patch__call_kvm_flush_link_stack, @@ -452,9 +452,9 @@ static void toggle_count_cache_flush(bool enable) return; } - // This enables the branch from _switch to flush_count_cache - patch_branch_site(&patch__call_flush_count_cache, - (u64)&flush_count_cache, BRANCH_SET_LINK); + // This enables the branch from _switch to flush_branch_caches + patch_branch_site(&patch__call_flush_branch_caches, + (u64)&flush_branch_caches, BRANCH_SET_LINK); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE // This enables the branch from guest_exit_cont to kvm_flush_link_stack @@ -474,13 +474,13 @@ static void toggle_count_cache_flush(bool enable) } if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { - count_cache_flush_type = COUNT_CACHE_FLUSH_SW; + count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; pr_info("count-cache-flush: full software flush sequence enabled.\n"); return; } patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR)); - count_cache_flush_type = COUNT_CACHE_FLUSH_HW; + count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); } @@ -505,7 +505,7 @@ void setup_count_cache_flush(void) security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); - toggle_count_cache_flush(enable); + toggle_branch_cache_flush(enable); } #ifdef CONFIG_DEBUG_FS @@ -520,14 +520,14 @@ static int count_cache_flush_set(void *data, u64 val) else return -EINVAL; - toggle_count_cache_flush(enable); + toggle_branch_cache_flush(enable); return 0; } static int count_cache_flush_get(void *data, u64 *val) { - if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) *val = 0; else *val = 1; -- cgit v1.2.3 From c06ac2771070f465076e87bba262c64fb0b3aca3 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Jun 2020 17:06:05 +1000 Subject: powerpc/security: change link stack flush state to the flush type enum Prepare to allow for hardware link stack flushing by using the none/sw/hw type, same as the count cache state. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609070610.846703-3-npiggin@gmail.com --- arch/powerpc/kernel/security.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index df2a3eff950b..28f4cb062f69 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -27,7 +27,7 @@ enum branch_cache_flush_type { BRANCH_CACHE_FLUSH_HW = 0x4, }; static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; -static bool link_stack_flush_enabled; +static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; bool barrier_nospec_enabled; static bool no_nospec; @@ -219,7 +219,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); - if (link_stack_flush_enabled) + if (link_stack_flush_type == BRANCH_CACHE_FLUSH_SW) seq_buf_printf(&s, ", Software link stack flush"); } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { @@ -228,7 +228,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) seq_buf_printf(&s, " (hardware accelerated)"); - if (link_stack_flush_enabled) + if (link_stack_flush_type == BRANCH_CACHE_FLUSH_SW) seq_buf_printf(&s, ", Software link stack flush"); } else if (btb_flush_enabled) { @@ -447,7 +447,7 @@ static void toggle_branch_cache_flush(bool enable) ppc_inst(PPC_INST_NOP)); #endif pr_info("link-stack-flush: software flush disabled.\n"); - link_stack_flush_enabled = false; + link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; no_count_cache_flush(); return; } @@ -463,7 +463,7 @@ static void toggle_branch_cache_flush(bool enable) #endif pr_info("link-stack-flush: software flush enabled.\n"); - link_stack_flush_enabled = true; + link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; // If we just need to flush the link stack, patch an early return if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { -- cgit v1.2.3 From 1afe00c74ffe6d502bffa81c7d849cb4640d7ae5 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Jun 2020 17:06:06 +1000 Subject: powerpc/security: make display of branch cache flush more consistent Make the count-cache and link-stack messages look the same Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609070610.846703-4-npiggin@gmail.com --- arch/powerpc/kernel/security.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 28f4cb062f69..659ef6a92bb9 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -430,7 +430,7 @@ device_initcall(stf_barrier_debugfs_init); static void no_count_cache_flush(void) { count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; - pr_info("count-cache-flush: software flush disabled.\n"); + pr_info("count-cache-flush: flush disabled.\n"); } static void toggle_branch_cache_flush(bool enable) @@ -446,7 +446,7 @@ static void toggle_branch_cache_flush(bool enable) patch_instruction_site(&patch__call_kvm_flush_link_stack, ppc_inst(PPC_INST_NOP)); #endif - pr_info("link-stack-flush: software flush disabled.\n"); + pr_info("link-stack-flush: flush disabled.\n"); link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; no_count_cache_flush(); return; @@ -475,13 +475,13 @@ static void toggle_branch_cache_flush(bool enable) if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; - pr_info("count-cache-flush: full software flush sequence enabled.\n"); + pr_info("count-cache-flush: software flush enabled.\n"); return; } patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR)); count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; - pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); + pr_info("count-cache-flush: hardware flush enabled.\n"); } void setup_count_cache_flush(void) -- cgit v1.2.3 From c0036549a9d9a060fa8bc24e31f85503ce08ad5e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Jun 2020 17:06:07 +1000 Subject: powerpc/security: split branch cache flush toggle from code patching Branch cache flushing code patching has inter-dependencies on both the link stack and the count cache flushing state. To make the code clearer and to separate the link stack and count cache handling, split the "toggle" (setting up variables and printing enable/disable) from the code patching. Signed-off-by: Nicholas Piggin [mpe: Always print something, even if the flush is disabled] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609070610.846703-5-npiggin@gmail.com --- arch/powerpc/kernel/security.c | 94 +++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 43 deletions(-) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 659ef6a92bb9..05eeb22b3b97 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -427,61 +427,69 @@ static __init int stf_barrier_debugfs_init(void) device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ -static void no_count_cache_flush(void) +static void update_branch_cache_flush(void) { - count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; - pr_info("count-cache-flush: flush disabled.\n"); -} - -static void toggle_branch_cache_flush(bool enable) -{ - if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && - !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) - enable = false; - - if (!enable) { - patch_instruction_site(&patch__call_flush_branch_caches, - ppc_inst(PPC_INST_NOP)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + // This controls the branch from guest_exit_cont to kvm_flush_link_stack + if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { patch_instruction_site(&patch__call_kvm_flush_link_stack, ppc_inst(PPC_INST_NOP)); -#endif - pr_info("link-stack-flush: flush disabled.\n"); - link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; - no_count_cache_flush(); - return; + } else { + patch_branch_site(&patch__call_kvm_flush_link_stack, + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); } - - // This enables the branch from _switch to flush_branch_caches - patch_branch_site(&patch__call_flush_branch_caches, - (u64)&flush_branch_caches, BRANCH_SET_LINK); - -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - // This enables the branch from guest_exit_cont to kvm_flush_link_stack - patch_branch_site(&patch__call_kvm_flush_link_stack, - (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); #endif - pr_info("link-stack-flush: software flush enabled.\n"); - link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; + // This controls the branch from _switch to flush_branch_caches + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && + link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { + patch_instruction_site(&patch__call_flush_branch_caches, + ppc_inst(PPC_INST_NOP)); + } else { + patch_branch_site(&patch__call_flush_branch_caches, + (u64)&flush_branch_caches, BRANCH_SET_LINK); + + // If we just need to flush the link stack, early return + if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { + patch_instruction_site(&patch__flush_link_stack_return, + ppc_inst(PPC_INST_BLR)); + + // If we have flush instruction, early return + } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) { + patch_instruction_site(&patch__flush_count_cache_return, + ppc_inst(PPC_INST_BLR)); + } + } +} + +static void toggle_branch_cache_flush(bool enable) +{ + if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) + count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; - // If we just need to flush the link stack, patch an early return - if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { - patch_instruction_site(&patch__flush_link_stack_return, - ppc_inst(PPC_INST_BLR)); - no_count_cache_flush(); - return; + pr_info("count-cache-flush: flush disabled.\n"); + } else { + if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { + count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; + pr_info("count-cache-flush: hardware flush enabled.\n"); + } else { + count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; + pr_info("count-cache-flush: software flush enabled.\n"); + } } - if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { - count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; - pr_info("count-cache-flush: software flush enabled.\n"); - return; + if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) { + if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) + link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; + + pr_info("link-stack-flush: flush disabled.\n"); + } else { + link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; + pr_info("link-stack-flush: software flush enabled.\n"); } - patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR)); - count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; - pr_info("count-cache-flush: hardware flush enabled.\n"); + update_branch_cache_flush(); } void setup_count_cache_flush(void) -- cgit v1.2.3 From 70d7cdaf0548ec95fa7204dcdd39cd8e63cee24d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Jun 2020 17:06:08 +1000 Subject: powerpc/64s: Move branch cache flushing bcctr variant to ppc-ops.h Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609070610.846703-6-npiggin@gmail.com --- arch/powerpc/include/asm/ppc-opcode.h | 2 ++ arch/powerpc/kernel/entry_64.S | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 777d5056a71c..3ee6fad902bc 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -195,6 +195,7 @@ #define OP_LQ 56 /* sorted alphabetically */ +#define PPC_INST_BCCTR_FLUSH 0x4c400420 #define PPC_INST_BHRBE 0x7c00025c #define PPC_INST_CLRBHRB 0x7c00035c #define PPC_INST_COPY 0x7c20060c @@ -437,6 +438,7 @@ #endif /* Deal with instructions that older assemblers aren't aware of */ +#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) #define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) #define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \ ___PPC_RA(a) | ___PPC_RB(b)) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index bdef9c11bad2..da85c2511e57 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -261,8 +261,6 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs); 1: nop; \ patch_site 1b, patch__call_flush_branch_caches -#define BCCTR_FLUSH .long 0x4c400420 - .macro nops number .rept \number nop @@ -293,7 +291,7 @@ flush_branch_caches: li r9,0x7fff mtctr r9 - BCCTR_FLUSH + PPC_BCCTR_FLUSH 2: nop patch_site 2b patch__flush_count_cache_return @@ -302,7 +300,7 @@ flush_branch_caches: .rept 278 .balign 32 - BCCTR_FLUSH + PPC_BCCTR_FLUSH nops 7 .endr -- cgit v1.2.3 From 4d24e21cc694e7253a532fe5a9bde12b284f1317 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 9 Jun 2020 17:06:09 +1000 Subject: powerpc/security: Allow for processors that flush the link stack using the special bcctr If both count cache and link stack are to be flushed, and can be flushed with the special bcctr, patch that in directly to the flush/branch nop site. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609070610.846703-7-npiggin@gmail.com --- arch/powerpc/include/asm/security_features.h | 2 ++ arch/powerpc/kernel/security.c | 27 +++++++++++++++++++-------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 7c05e95a5c44..fbb8fa32150f 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -63,6 +63,8 @@ static inline bool security_ftr_enabled(u64 feature) // bcctr 2,0,0 triggers a hardware assisted count cache flush #define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull +// bcctr 2,0,0 triggers a hardware assisted link stack flush +#define SEC_FTR_BCCTR_LINK_FLUSH_ASSIST 0x0000000000002000ull // Features indicating need for Spectre/Meltdown mitigations diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 05eeb22b3b97..c9876aab3142 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -219,24 +219,25 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); - if (link_stack_flush_type == BRANCH_CACHE_FLUSH_SW) - seq_buf_printf(&s, ", Software link stack flush"); - } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { seq_buf_printf(&s, "Mitigation: Software count cache flush"); if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) seq_buf_printf(&s, " (hardware accelerated)"); - if (link_stack_flush_type == BRANCH_CACHE_FLUSH_SW) - seq_buf_printf(&s, ", Software link stack flush"); - } else if (btb_flush_enabled) { seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); } else { seq_buf_printf(&s, "Vulnerable"); } + if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { + if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) + seq_buf_printf(&s, ", Software link stack flush"); + if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) + seq_buf_printf(&s, " (hardware accelerated)"); + } + seq_buf_printf(&s, "\n"); return s.len; @@ -435,6 +436,7 @@ static void update_branch_cache_flush(void) patch_instruction_site(&patch__call_kvm_flush_link_stack, ppc_inst(PPC_INST_NOP)); } else { + // Could use HW flush, but that could also flush count cache patch_branch_site(&patch__call_kvm_flush_link_stack, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); } @@ -445,6 +447,10 @@ static void update_branch_cache_flush(void) link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { patch_instruction_site(&patch__call_flush_branch_caches, ppc_inst(PPC_INST_NOP)); + } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && + link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { + patch_instruction_site(&patch__call_flush_branch_caches, + ppc_inst(PPC_INST_BCCTR_FLUSH)); } else { patch_branch_site(&patch__call_flush_branch_caches, (u64)&flush_branch_caches, BRANCH_SET_LINK); @@ -485,8 +491,13 @@ static void toggle_branch_cache_flush(bool enable) pr_info("link-stack-flush: flush disabled.\n"); } else { - link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; - pr_info("link-stack-flush: software flush enabled.\n"); + if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) { + link_stack_flush_type = BRANCH_CACHE_FLUSH_HW; + pr_info("link-stack-flush: hardware flush enabled.\n"); + } else { + link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; + pr_info("link-stack-flush: software flush enabled.\n"); + } } update_branch_cache_flush(); -- cgit v1.2.3 From 48f6e7f6d948b56489da027bc3284c709b939d28 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:21 -0500 Subject: powerpc/pseries: remove cede offline state for CPUs This effectively reverts commit 3aa565f53c39 ("powerpc/pseries: Add hooks to put the CPU into an appropriate offline state"), which added an offline mode for CPUs which uses the H_CEDE hcall instead of the architected stop-self RTAS function in order to facilitate "folding" of dedicated mode processors on PowerVM platforms to achieve energy savings. This has been the default offline mode since its introduction. There's nothing about stop-self that would prevent the hypervisor from achieving the energy savings available via H_CEDE, so the original premise of this change appears to be flawed. I also have encountered the claim that the transition to and from ceded state is much faster than stop-self/start-cpu. Certainly we would not want to use stop-self as an *idle* mode. That is what H_CEDE is for. However, this difference is insignificant in the context of Linux CPU hotplug, where the latency of an offline or online operation on current systems is on the order of 100ms, mainly attributable to all the various subsystems' cpuhp callbacks. The cede offline mode also prevents accurate accounting, as discussed before: https://lore.kernel.org/linuxppc-dev/1571740391-3251-1-git-send-email-ego@linux.vnet.ibm.com/ Unconditionally use stop-self to offline processor threads. This is the architected method for offlining CPUs on PAPR systems. The "cede_offline" boot parameter is rendered obsolete. Removing this code enables the removal of the partition suspend code which temporarily onlines all present CPUs. Fixes: 3aa565f53c39 ("powerpc/pseries: Add hooks to put the CPU into an appropriate offline state") Signed-off-by: Nathan Lynch Reviewed-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-2-nathanl@linux.ibm.com --- Documentation/core-api/cpu_hotplug.rst | 7 - arch/powerpc/platforms/pseries/hotplug-cpu.c | 170 ++---------------------- arch/powerpc/platforms/pseries/offline_states.h | 38 ------ arch/powerpc/platforms/pseries/pmem.c | 1 - arch/powerpc/platforms/pseries/smp.c | 28 +--- 5 files changed, 15 insertions(+), 229 deletions(-) delete mode 100644 arch/powerpc/platforms/pseries/offline_states.h diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst index 4a50ab7817f7..b1ae1ac159cf 100644 --- a/Documentation/core-api/cpu_hotplug.rst +++ b/Documentation/core-api/cpu_hotplug.rst @@ -50,13 +50,6 @@ Command Line Switches This option is limited to the X86 and S390 architecture. -``cede_offline={"off","on"}`` - Use this option to disable/enable putting offlined processors to an extended - ``H_CEDE`` state on supported pseries platforms. If nothing is specified, - ``cede_offline`` is set to "on". - - This option is limited to the PowerPC architecture. - ``cpu0_hotplug`` Allow to shutdown CPU0. diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 3e8cbfe7a80f..d4b346355bb9 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -35,54 +35,10 @@ #include #include "pseries.h" -#include "offline_states.h" /* This version can't take the spinlock, because it never returns */ static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; -static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = - CPU_STATE_OFFLINE; -static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; - -static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; - -static bool cede_offline_enabled __read_mostly = true; - -/* - * Enable/disable cede_offline when available. - */ -static int __init setup_cede_offline(char *str) -{ - return (kstrtobool(str, &cede_offline_enabled) == 0); -} - -__setup("cede_offline=", setup_cede_offline); - -enum cpu_state_vals get_cpu_current_state(int cpu) -{ - return per_cpu(current_state, cpu); -} - -void set_cpu_current_state(int cpu, enum cpu_state_vals state) -{ - per_cpu(current_state, cpu) = state; -} - -enum cpu_state_vals get_preferred_offline_state(int cpu) -{ - return per_cpu(preferred_offline_state, cpu); -} - -void set_preferred_offline_state(int cpu, enum cpu_state_vals state) -{ - per_cpu(preferred_offline_state, cpu) = state; -} - -void set_default_offline_state(int cpu) -{ - per_cpu(preferred_offline_state, cpu) = default_offline_state; -} - static void rtas_stop_self(void) { static struct rtas_args args; @@ -101,9 +57,7 @@ static void rtas_stop_self(void) static void pseries_mach_cpu_die(void) { - unsigned int cpu = smp_processor_id(); unsigned int hwcpu = hard_smp_processor_id(); - u8 cede_latency_hint = 0; local_irq_disable(); idle_task_exit(); @@ -112,49 +66,6 @@ static void pseries_mach_cpu_die(void) else xics_teardown_cpu(); - if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { - set_cpu_current_state(cpu, CPU_STATE_INACTIVE); - if (ppc_md.suspend_disable_cpu) - ppc_md.suspend_disable_cpu(); - - cede_latency_hint = 2; - - get_lppaca()->idle = 1; - if (!lppaca_shared_proc(get_lppaca())) - get_lppaca()->donate_dedicated_cpu = 1; - - while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { - while (!prep_irq_for_idle()) { - local_irq_enable(); - local_irq_disable(); - } - - extended_cede_processor(cede_latency_hint); - } - - local_irq_disable(); - - if (!lppaca_shared_proc(get_lppaca())) - get_lppaca()->donate_dedicated_cpu = 0; - get_lppaca()->idle = 0; - - if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { - unregister_slb_shadow(hwcpu); - - hard_irq_disable(); - /* - * Call to start_secondary_resume() will not return. - * Kernel stack will be reset and start_secondary() - * will be called to continue the online operation. - */ - start_secondary_resume(); - } - } - - /* Requested state is CPU_STATE_OFFLINE at this point */ - WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); - - set_cpu_current_state(cpu, CPU_STATE_OFFLINE); unregister_slb_shadow(hwcpu); rtas_stop_self(); @@ -200,24 +111,13 @@ static void pseries_cpu_die(unsigned int cpu) int cpu_status = 1; unsigned int pcpu = get_hard_smp_processor_id(cpu); - if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { - cpu_status = 1; - for (tries = 0; tries < 5000; tries++) { - if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { - cpu_status = 0; - break; - } - msleep(1); - } - } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { + for (tries = 0; tries < 25; tries++) { + cpu_status = smp_query_cpu_stopped(pcpu); + if (cpu_status == QCSS_STOPPED || + cpu_status == QCSS_HARDWARE_ERROR) + break; + cpu_relax(); - for (tries = 0; tries < 25; tries++) { - cpu_status = smp_query_cpu_stopped(pcpu); - if (cpu_status == QCSS_STOPPED || - cpu_status == QCSS_HARDWARE_ERROR) - break; - cpu_relax(); - } } if (cpu_status != 0) { @@ -359,28 +259,15 @@ static int dlpar_offline_cpu(struct device_node *dn) if (get_hard_smp_processor_id(cpu) != thread) continue; - if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) + if (!cpu_online(cpu)) break; - if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { - set_preferred_offline_state(cpu, - CPU_STATE_OFFLINE); - cpu_maps_update_done(); - timed_topology_update(1); - rc = device_offline(get_cpu_device(cpu)); - if (rc) - goto out; - cpu_maps_update_begin(); - break; - } - - /* - * The cpu is in CPU_STATE_INACTIVE. - * Upgrade it's state to CPU_STATE_OFFLINE. - */ - set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); - WARN_ON(plpar_hcall_norets(H_PROD, thread) != H_SUCCESS); - __cpu_die(cpu); + cpu_maps_update_done(); + timed_topology_update(1); + rc = device_offline(get_cpu_device(cpu)); + if (rc) + goto out; + cpu_maps_update_begin(); break; } if (cpu == num_possible_cpus()) { @@ -414,8 +301,6 @@ static int dlpar_online_cpu(struct device_node *dn) for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != thread) continue; - BUG_ON(get_cpu_current_state(cpu) - != CPU_STATE_OFFLINE); cpu_maps_update_done(); timed_topology_update(1); find_and_online_cpu_nid(cpu); @@ -1013,27 +898,8 @@ static struct notifier_block pseries_smp_nb = { .notifier_call = pseries_smp_notifier, }; -#define MAX_CEDE_LATENCY_LEVELS 4 -#define CEDE_LATENCY_PARAM_LENGTH 10 -#define CEDE_LATENCY_PARAM_MAX_LENGTH \ - (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) -#define CEDE_LATENCY_TOKEN 45 - -static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; - -static int parse_cede_parameters(void) -{ - memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); - return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - CEDE_LATENCY_TOKEN, - __pa(cede_parameters), - CEDE_LATENCY_PARAM_MAX_LENGTH); -} - static int __init pseries_cpu_hotplug_init(void) { - int cpu; int qcss_tok; #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE @@ -1056,16 +922,8 @@ static int __init pseries_cpu_hotplug_init(void) smp_ops->cpu_die = pseries_cpu_die; /* Processors can be added/removed only on LPAR */ - if (firmware_has_feature(FW_FEATURE_LPAR)) { + if (firmware_has_feature(FW_FEATURE_LPAR)) of_reconfig_notifier_register(&pseries_smp_nb); - cpu_maps_update_begin(); - if (cede_offline_enabled && parse_cede_parameters() == 0) { - default_offline_state = CPU_STATE_INACTIVE; - for_each_online_cpu(cpu) - set_default_offline_state(cpu); - } - cpu_maps_update_done(); - } return 0; } diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h deleted file mode 100644 index 51414aee2862..000000000000 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _OFFLINE_STATES_H_ -#define _OFFLINE_STATES_H_ - -/* Cpu offline states go here */ -enum cpu_state_vals { - CPU_STATE_OFFLINE, - CPU_STATE_INACTIVE, - CPU_STATE_ONLINE, - CPU_MAX_OFFLINE_STATES -}; - -#ifdef CONFIG_HOTPLUG_CPU -extern enum cpu_state_vals get_cpu_current_state(int cpu); -extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); -extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); -extern void set_default_offline_state(int cpu); -#else -static inline enum cpu_state_vals get_cpu_current_state(int cpu) -{ - return CPU_STATE_ONLINE; -} - -static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) -{ -} - -static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state) -{ -} - -static inline void set_default_offline_state(int cpu) -{ -} -#endif - -extern enum cpu_state_vals get_preferred_offline_state(int cpu); -#endif diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index 2347e1038f58..e1dc5d3254df 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -24,7 +24,6 @@ #include #include "pseries.h" -#include "offline_states.h" static struct device_node *pmem_node; diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 6891710833be..7ebacac03dc3 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -44,8 +44,6 @@ #include #include "pseries.h" -#include "offline_states.h" - /* * The Primary thread of each non-boot processor was started from the OF client @@ -108,10 +106,7 @@ static inline int smp_startup_cpu(unsigned int lcpu) /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0; -#ifdef CONFIG_HOTPLUG_CPU - if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) - goto out; -#endif + /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. @@ -126,9 +121,6 @@ static inline int smp_startup_cpu(unsigned int lcpu) return 0; } -#ifdef CONFIG_HOTPLUG_CPU -out: -#endif return 1; } @@ -143,10 +135,6 @@ static void smp_setup_cpu(int cpu) vpa_init(cpu); cpumask_clear_cpu(cpu, of_spin_mask); -#ifdef CONFIG_HOTPLUG_CPU - set_cpu_current_state(cpu, CPU_STATE_ONLINE); - set_default_offline_state(cpu); -#endif } static int smp_pSeries_kick_cpu(int nr) @@ -163,20 +151,6 @@ static int smp_pSeries_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca_ptrs[nr]->cpu_start = 1; -#ifdef CONFIG_HOTPLUG_CPU - set_preferred_offline_state(nr, CPU_STATE_ONLINE); - - if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { - long rc; - unsigned long hcpuid; - - hcpuid = get_hard_smp_processor_id(nr); - rc = plpar_hcall_norets(H_PROD, hcpuid); - if (rc != H_SUCCESS) - printk(KERN_ERR "Error: Prod to wake up processor %d " - "Ret= %ld\n", nr, rc); - } -#endif return 0; } -- cgit v1.2.3 From ec2fc2a9e9bbad9023aab65bc472ce7a3ca8608f Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:22 -0500 Subject: powerpc/rtas: don't online CPUs for partition suspend Partition suspension, used for hibernation and migration, requires that the OS place all but one of the LPAR's processor threads into one of two states prior to calling the ibm,suspend-me RTAS function: * the architected offline state (via RTAS stop-self); or * the H_JOIN hcall, which does not return until the partition resumes execution Using H_CEDE as the offline mode, introduced by commit 3aa565f53c39 ("powerpc/pseries: Add hooks to put the CPU into an appropriate offline state"), means that any threads which are offline from Linux's point of view must be moved to one of those two states before a partition suspension can proceed. This was eventually addressed in commit 120496ac2d2d ("powerpc: Bring all threads online prior to migration/hibernation"), which added code to temporarily bring up any offline processor threads so they can call H_JOIN. Conceptually this is fine, but the implementation has had multiple races with cpu hotplug operations initiated from user space[1][2][3], the error handling is fragile, and it generates user-visible cpu hotplug events which is a lot of noise for a platform feature that's supposed to minimize disruption to workloads. With commit 3aa565f53c39 ("powerpc/pseries: Add hooks to put the CPU into an appropriate offline state") reverted, this code becomes unnecessary, so remove it. Since any offline CPUs now are truly offline from the platform's point of view, it is no longer necessary to bring up CPUs only to have them call H_JOIN and then go offline again upon resuming. Only active threads are required to call H_JOIN; stopped threads can be left alone. [1] commit a6717c01ddc2 ("powerpc/rtas: use device model APIs and serialization during LPM") [2] commit 9fb603050ffd ("powerpc/rtas: retry when cpu offline races with suspend/migration") [3] commit dfd718a2ed1f ("powerpc/rtas: Fix a potential race between CPU-Offline & Migration") Fixes: 120496ac2d2d ("powerpc: Bring all threads online prior to migration/hibernation") Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-3-nathanl@linux.ibm.com --- arch/powerpc/include/asm/rtas.h | 2 - arch/powerpc/kernel/rtas.c | 122 +------------------------------ arch/powerpc/platforms/pseries/suspend.c | 22 +----- 3 files changed, 3 insertions(+), 143 deletions(-) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 014968f25f7e..0107d724e9da 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -253,8 +253,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); -extern int rtas_online_cpus_mask(cpumask_var_t cpus); -extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(u64 handle); struct rtc_time; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index a09eba03f180..806d554ce357 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -843,96 +843,6 @@ static void rtas_percpu_suspend_me(void *info) __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); } -enum rtas_cpu_state { - DOWN, - UP, -}; - -#ifndef CONFIG_SMP -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - if (!cpumask_empty(cpus)) { - cpumask_clear(cpus); - return -EINVAL; - } else - return 0; -} -#else -/* On return cpumask will be altered to indicate CPUs changed. - * CPUs with states changed will be set in the mask, - * CPUs with status unchanged will be unset in the mask. */ -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - int cpu; - int cpuret = 0; - int ret = 0; - - if (cpumask_empty(cpus)) - return 0; - - for_each_cpu(cpu, cpus) { - struct device *dev = get_cpu_device(cpu); - - switch (state) { - case DOWN: - cpuret = device_offline(dev); - break; - case UP: - cpuret = device_online(dev); - break; - } - if (cpuret < 0) { - pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", - __func__, - ((state == UP) ? "up" : "down"), - cpu, cpuret); - if (!ret) - ret = cpuret; - if (state == UP) { - /* clear bits for unchanged cpus, return */ - cpumask_shift_right(cpus, cpus, cpu); - cpumask_shift_left(cpus, cpus, cpu); - break; - } else { - /* clear bit for unchanged cpu, continue */ - cpumask_clear_cpu(cpu, cpus); - } - } - cond_resched(); - } - - return ret; -} -#endif - -int rtas_online_cpus_mask(cpumask_var_t cpus) -{ - int ret; - - ret = rtas_cpu_state_change_mask(UP, cpus); - - if (ret) { - cpumask_var_t tmp_mask; - - if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) - return ret; - - /* Use tmp_mask to preserve cpus mask from first failure */ - cpumask_copy(tmp_mask, cpus); - rtas_offline_cpus_mask(tmp_mask); - free_cpumask_var(tmp_mask); - } - - return ret; -} - -int rtas_offline_cpus_mask(cpumask_var_t cpus) -{ - return rtas_cpu_state_change_mask(DOWN, cpus); -} - int rtas_ibm_suspend_me(u64 handle) { long state; @@ -940,8 +850,6 @@ int rtas_ibm_suspend_me(u64 handle) unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); - cpumask_var_t offline_mask; - int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; @@ -962,9 +870,6 @@ int rtas_ibm_suspend_me(u64 handle) return -EIO; } - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) - return -ENOMEM; - atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); @@ -973,24 +878,8 @@ int rtas_ibm_suspend_me(u64 handle) lock_device_hotplug(); - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); - cpuret = rtas_online_cpus_mask(offline_mask); - if (cpuret) { - pr_err("%s: Could not bring present CPUs online.\n", __func__); - atomic_set(&data.error, cpuret); - goto out; - } - cpu_hotplug_disable(); - /* Check if we raced with a CPU-Offline Operation */ - if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) { - pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__); - atomic_set(&data.error, -EAGAIN); - goto out_hotplug_enable; - } - /* Call function on all CPUs. One of us will make the * rtas call */ @@ -1001,18 +890,11 @@ int rtas_ibm_suspend_me(u64 handle) if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); -out_hotplug_enable: - cpu_hotplug_enable(); - /* Take down CPUs not online prior to suspend */ - cpuret = rtas_offline_cpus_mask(offline_mask); - if (cpuret) - pr_warn("%s: Could not restore CPUs to offline state.\n", - __func__); + cpu_hotplug_enable(); -out: unlock_device_hotplug(); - free_cpumask_var(offline_mask); + return atomic_read(&data.error); } diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 0a24a5a185f0..f789693f61f4 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -132,15 +132,11 @@ static ssize_t store_hibernate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - cpumask_var_t offline_mask; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) - return -ENOMEM; - stream_id = simple_strtoul(buf, NULL, 16); do { @@ -150,32 +146,16 @@ static ssize_t store_hibernate(struct device *dev, } while (rc == -EAGAIN); if (!rc) { - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, - cpu_online_mask); - rc = rtas_online_cpus_mask(offline_mask); - if (rc) { - pr_err("%s: Could not bring present CPUs online.\n", - __func__); - goto out; - } - stop_topology_update(); rc = pm_suspend(PM_SUSPEND_MEM); start_topology_update(); - - /* Take down CPUs not online prior to suspend */ - if (!rtas_offline_cpus_mask(offline_mask)) - pr_warn("%s: Could not restore CPUs to offline " - "state.\n", __func__); } stream_id = 0; if (!rc) rc = count; -out: - free_cpumask_var(offline_mask); + return rc; } -- cgit v1.2.3 From c30f931e891eb0a32885ecd79984e1e7366fceda Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:23 -0500 Subject: powerpc/numa: remove ability to enable topology updates Remove the /proc/powerpc/topology_updates interface and the topology_updates=on/off command line argument. The internal topology_updates_enabled flag remains for now, but always false. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-4-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 71 +------------------------------------------------- 1 file changed, 1 insertion(+), 70 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..34d95de77bdd 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -984,27 +984,7 @@ static int __init early_numa(char *p) } early_param("numa", early_numa); -/* - * The platform can inform us through one of several mechanisms - * (post-migration device tree updates, PRRN or VPHN) that the NUMA - * assignment of a resource has changed. This controls whether we act - * on that. Disabled by default. - */ -static bool topology_updates_enabled; - -static int __init early_topology_updates(char *p) -{ - if (!p) - return 0; - - if (!strcmp(p, "on")) { - pr_warn("Caution: enabling topology updates\n"); - topology_updates_enabled = true; - } - - return 0; -} -early_param("topology_updates", early_topology_updates); +static const bool topology_updates_enabled; #ifdef CONFIG_MEMORY_HOTPLUG /* @@ -1632,52 +1612,6 @@ int prrn_is_enabled(void) return prrn_enabled; } -static int topology_read(struct seq_file *file, void *v) -{ - if (vphn_enabled || prrn_enabled) - seq_puts(file, "on\n"); - else - seq_puts(file, "off\n"); - - return 0; -} - -static int topology_open(struct inode *inode, struct file *file) -{ - return single_open(file, topology_read, NULL); -} - -static ssize_t topology_write(struct file *file, const char __user *buf, - size_t count, loff_t *off) -{ - char kbuf[4]; /* "on" or "off" plus null. */ - int read_len; - - read_len = count < 3 ? count : 3; - if (copy_from_user(kbuf, buf, read_len)) - return -EINVAL; - - kbuf[read_len] = '\0'; - - if (!strncmp(kbuf, "on", 2)) { - topology_updates_enabled = true; - start_topology_update(); - } else if (!strncmp(kbuf, "off", 3)) { - stop_topology_update(); - topology_updates_enabled = false; - } else - return -EINVAL; - - return count; -} - -static const struct proc_ops topology_proc_ops = { - .proc_read = seq_read, - .proc_write = topology_write, - .proc_open = topology_open, - .proc_release = single_release, -}; - static int topology_update_init(void) { start_topology_update(); @@ -1685,9 +1619,6 @@ static int topology_update_init(void) if (vphn_enabled) topology_schedule_update(); - if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_proc_ops)) - return -ENOMEM; - topology_inited = 1; return 0; } -- cgit v1.2.3 From 7d35bef96a46f7e9e167bb25258c0bd389aeab1b Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:24 -0500 Subject: powerpc/numa: remove unreachable topology update code Since the topology_updates_enabled flag is now always false, remove it and the code which has become unreachable. This is the minimum change that prevents 'defined but unused' warnings emitted by the compiler after stubbing out the start/stop_topology_updates() functions. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-5-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 149 +------------------------------------------------ 1 file changed, 2 insertions(+), 147 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 34d95de77bdd..9e20f12e6caf 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -984,8 +984,6 @@ static int __init early_numa(char *p) } early_param("numa", early_numa); -static const bool topology_updates_enabled; - #ifdef CONFIG_MEMORY_HOTPLUG /* * Find the node associated with a hot added memory section for @@ -1133,7 +1131,6 @@ struct topology_update_data { #define TOPOLOGY_DEF_TIMER_SECS 60 -static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; static cpumask_t cpu_associativity_changes_mask; static int vphn_enabled; static int prrn_enabled; @@ -1158,63 +1155,6 @@ int timed_topology_update(int nsecs) return 0; } -/* - * Store the current values of the associativity change counters in the - * hypervisor. - */ -static void setup_cpu_associativity_change_counters(void) -{ - int cpu; - - /* The VPHN feature supports a maximum of 8 reference points */ - BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); - - for_each_possible_cpu(cpu) { - int i; - u8 *counts = vphn_cpu_change_counts[cpu]; - volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts; - - for (i = 0; i < distance_ref_points_depth; i++) - counts[i] = hypervisor_counts[i]; - } -} - -/* - * The hypervisor maintains a set of 8 associativity change counters in - * the VPA of each cpu that correspond to the associativity levels in the - * ibm,associativity-reference-points property. When an associativity - * level changes, the corresponding counter is incremented. - * - * Set a bit in cpu_associativity_changes_mask for each cpu whose home - * node associativity levels have changed. - * - * Returns the number of cpus with unhandled associativity changes. - */ -static int update_cpu_associativity_changes_mask(void) -{ - int cpu; - cpumask_t *changes = &cpu_associativity_changes_mask; - - for_each_possible_cpu(cpu) { - int i, changed = 0; - u8 *counts = vphn_cpu_change_counts[cpu]; - volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts; - - for (i = 0; i < distance_ref_points_depth; i++) { - if (hypervisor_counts[i] != counts[i]) { - counts[i] = hypervisor_counts[i]; - changed = 1; - } - } - if (changed) { - cpumask_or(changes, changes, cpu_sibling_mask(cpu)); - cpu = cpu_last_thread_sibling(cpu); - } - } - - return cpumask_weight(changes); -} - /* * Retrieve the new associativity information for a virtual processor's * home node. @@ -1498,16 +1438,6 @@ static void topology_schedule_update(void) schedule_work(&topology_work); } -static void topology_timer_fn(struct timer_list *unused) -{ - if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) - topology_schedule_update(); - else if (vphn_enabled) { - if (update_cpu_associativity_changes_mask() > 0) - topology_schedule_update(); - reset_topology_timer(); - } -} static struct timer_list topology_timer; static void reset_topology_timer(void) @@ -1516,69 +1446,12 @@ static void reset_topology_timer(void) mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); } -#ifdef CONFIG_SMP - -static int dt_update_callback(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct of_reconfig_data *update = data; - int rc = NOTIFY_DONE; - - switch (action) { - case OF_RECONFIG_UPDATE_PROPERTY: - if (of_node_is_type(update->dn, "cpu") && - !of_prop_cmp(update->prop->name, "ibm,associativity")) { - u32 core_id; - of_property_read_u32(update->dn, "reg", &core_id); - rc = dlpar_cpu_readd(core_id); - rc = NOTIFY_OK; - } - break; - } - - return rc; -} - -static struct notifier_block dt_update_nb = { - .notifier_call = dt_update_callback, -}; - -#endif - /* * Start polling for associativity changes. */ int start_topology_update(void) { - int rc = 0; - - if (!topology_updates_enabled) - return 0; - - if (firmware_has_feature(FW_FEATURE_PRRN)) { - if (!prrn_enabled) { - prrn_enabled = 1; -#ifdef CONFIG_SMP - rc = of_reconfig_notifier_register(&dt_update_nb); -#endif - } - } - if (firmware_has_feature(FW_FEATURE_VPHN) && - lppaca_shared_proc(get_lppaca())) { - if (!vphn_enabled) { - vphn_enabled = 1; - setup_cpu_associativity_change_counters(); - timer_setup(&topology_timer, topology_timer_fn, - TIMER_DEFERRABLE); - reset_topology_timer(); - } - } - - pr_info("Starting topology update%s%s\n", - (prrn_enabled ? " prrn_enabled" : ""), - (vphn_enabled ? " vphn_enabled" : "")); - - return rc; + return 0; } /* @@ -1586,25 +1459,7 @@ int start_topology_update(void) */ int stop_topology_update(void) { - int rc = 0; - - if (!topology_updates_enabled) - return 0; - - if (prrn_enabled) { - prrn_enabled = 0; -#ifdef CONFIG_SMP - rc = of_reconfig_notifier_unregister(&dt_update_nb); -#endif - } - if (vphn_enabled) { - vphn_enabled = 0; - rc = del_timer_sync(&topology_timer); - } - - pr_info("Stopping topology update\n"); - - return rc; + return 0; } int prrn_is_enabled(void) -- cgit v1.2.3 From e6eacf8eb4dee7bc7021c837666e3ebf1b0ec3b5 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:25 -0500 Subject: powerpc/numa: make vphn_enabled, prrn_enabled flags const Previous changes have made it so these flags are never changed; enforce this by making them const. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-6-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9e20f12e6caf..1b89bacb8975 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1132,8 +1132,8 @@ struct topology_update_data { #define TOPOLOGY_DEF_TIMER_SECS 60 static cpumask_t cpu_associativity_changes_mask; -static int vphn_enabled; -static int prrn_enabled; +static const int vphn_enabled; +static const int prrn_enabled; static void reset_topology_timer(void); static int topology_timer_secs = 1; static int topology_inited; -- cgit v1.2.3 From 50e0cf3742a01e72f4ea4a8fe9221b152e22871b Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:26 -0500 Subject: powerpc/numa: remove unreachable topology timer code Since vphn_enabled is always 0, we can stub out timed_topology_update() and remove the code which becomes unreachable. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-7-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 1b89bacb8975..6207297490a8 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1129,13 +1129,9 @@ struct topology_update_data { int new_nid; }; -#define TOPOLOGY_DEF_TIMER_SECS 60 - static cpumask_t cpu_associativity_changes_mask; static const int vphn_enabled; static const int prrn_enabled; -static void reset_topology_timer(void); -static int topology_timer_secs = 1; static int topology_inited; /* @@ -1143,15 +1139,6 @@ static int topology_inited; */ int timed_topology_update(int nsecs) { - if (vphn_enabled) { - if (nsecs > 0) - topology_timer_secs = nsecs; - else - topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS; - - reset_topology_timer(); - } - return 0; } @@ -1438,14 +1425,6 @@ static void topology_schedule_update(void) schedule_work(&topology_work); } -static struct timer_list topology_timer; - -static void reset_topology_timer(void) -{ - if (vphn_enabled) - mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); -} - /* * Start polling for associativity changes. */ -- cgit v1.2.3 From 6325cb4a4ea8f4af8515b923650dd8f709694b44 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:27 -0500 Subject: powerpc/numa: remove unreachable topology workqueue code Since vphn_enabled is always 0, we can remove the call to topology_schedule_update() and remove the code which becomes unreachable as a result. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-8-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6207297490a8..8415481a7f13 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1414,17 +1414,6 @@ int arch_update_cpu_topology(void) return numa_update_cpu_topology(true); } -static void topology_work_fn(struct work_struct *work) -{ - rebuild_sched_domains(); -} -static DECLARE_WORK(topology_work, topology_work_fn); - -static void topology_schedule_update(void) -{ - schedule_work(&topology_work); -} - /* * Start polling for associativity changes. */ @@ -1450,9 +1439,6 @@ static int topology_update_init(void) { start_topology_update(); - if (vphn_enabled) - topology_schedule_update(); - topology_inited = 1; return 0; } -- cgit v1.2.3 From 9fb8b5fd1bf782a8257506ad5198237f4124d556 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:28 -0500 Subject: powerpc/numa: remove vphn_enabled and prrn_enabled internal flags These flags are always zero now; remove them and suitably adjust the remaining references to them. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-9-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 8415481a7f13..8749d7f2b1a6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1130,8 +1130,6 @@ struct topology_update_data { }; static cpumask_t cpu_associativity_changes_mask; -static const int vphn_enabled; -static const int prrn_enabled; static int topology_inited; /* @@ -1292,7 +1290,7 @@ int numa_update_cpu_topology(bool cpus_locked) struct device *dev; int weight, new_nid, i = 0; - if (!prrn_enabled && !vphn_enabled && topology_inited) + if (topology_inited) return 0; weight = cpumask_weight(&cpu_associativity_changes_mask); @@ -1432,7 +1430,7 @@ int stop_topology_update(void) int prrn_is_enabled(void) { - return prrn_enabled; + return 0; } static int topology_update_init(void) -- cgit v1.2.3 From 893ec6461f46c91487d914e6d467d2e804b9a883 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:29 -0500 Subject: powerpc/numa: stub out numa_update_cpu_topology() Previous changes have removed the code which sets bits in cpu_associativity_changes_mask and thus it is never modifed at runtime. From this we can reason that numa_update_cpu_topology() always returns 0 without doing anything. Remove the body of numa_update_cpu_topology() and remove all code which becomes unreachable as a result. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-10-nathanl@linux.ibm.com --- arch/powerpc/mm/numa.c | 193 +------------------------------------------------ 1 file changed, 1 insertion(+), 192 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 8749d7f2b1a6..b220e5b60140 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1122,14 +1122,6 @@ u64 memory_hotplug_max(void) /* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR -struct topology_update_data { - struct topology_update_data *next; - unsigned int cpu; - int old_nid; - int new_nid; -}; - -static cpumask_t cpu_associativity_changes_mask; static int topology_inited; /* @@ -1219,192 +1211,9 @@ int find_and_online_cpu_nid(int cpu) return new_nid; } -/* - * Update the CPU maps and sysfs entries for a single CPU when its NUMA - * characteristics change. This function doesn't perform any locking and is - * only safe to call from stop_machine(). - */ -static int update_cpu_topology(void *data) -{ - struct topology_update_data *update; - unsigned long cpu; - - if (!data) - return -EINVAL; - - cpu = smp_processor_id(); - - for (update = data; update; update = update->next) { - int new_nid = update->new_nid; - if (cpu != update->cpu) - continue; - - unmap_cpu_from_node(cpu); - map_cpu_to_node(cpu, new_nid); - set_cpu_numa_node(cpu, new_nid); - set_cpu_numa_mem(cpu, local_memory_node(new_nid)); - vdso_getcpu_init(); - } - - return 0; -} - -static int update_lookup_table(void *data) -{ - struct topology_update_data *update; - - if (!data) - return -EINVAL; - - /* - * Upon topology update, the numa-cpu lookup table needs to be updated - * for all threads in the core, including offline CPUs, to ensure that - * future hotplug operations respect the cpu-to-node associativity - * properly. - */ - for (update = data; update; update = update->next) { - int nid, base, j; - - nid = update->new_nid; - base = cpu_first_thread_sibling(update->cpu); - - for (j = 0; j < threads_per_core; j++) { - update_numa_cpu_lookup_table(base + j, nid); - } - } - - return 0; -} - -/* - * Update the node maps and sysfs entries for each cpu whose home node - * has changed. Returns 1 when the topology has changed, and 0 otherwise. - * - * cpus_locked says whether we already hold cpu_hotplug_lock. - */ int numa_update_cpu_topology(bool cpus_locked) { - unsigned int cpu, sibling, changed = 0; - struct topology_update_data *updates, *ud; - cpumask_t updated_cpus; - struct device *dev; - int weight, new_nid, i = 0; - - if (topology_inited) - return 0; - - weight = cpumask_weight(&cpu_associativity_changes_mask); - if (!weight) - return 0; - - updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL); - if (!updates) - return 0; - - cpumask_clear(&updated_cpus); - - for_each_cpu(cpu, &cpu_associativity_changes_mask) { - /* - * If siblings aren't flagged for changes, updates list - * will be too short. Skip on this update and set for next - * update. - */ - if (!cpumask_subset(cpu_sibling_mask(cpu), - &cpu_associativity_changes_mask)) { - pr_info("Sibling bits not set for associativity " - "change, cpu%d\n", cpu); - cpumask_or(&cpu_associativity_changes_mask, - &cpu_associativity_changes_mask, - cpu_sibling_mask(cpu)); - cpu = cpu_last_thread_sibling(cpu); - continue; - } - - new_nid = find_and_online_cpu_nid(cpu); - - if (new_nid == numa_cpu_lookup_table[cpu]) { - cpumask_andnot(&cpu_associativity_changes_mask, - &cpu_associativity_changes_mask, - cpu_sibling_mask(cpu)); - dbg("Assoc chg gives same node %d for cpu%d\n", - new_nid, cpu); - cpu = cpu_last_thread_sibling(cpu); - continue; - } - - for_each_cpu(sibling, cpu_sibling_mask(cpu)) { - ud = &updates[i++]; - ud->next = &updates[i]; - ud->cpu = sibling; - ud->new_nid = new_nid; - ud->old_nid = numa_cpu_lookup_table[sibling]; - cpumask_set_cpu(sibling, &updated_cpus); - } - cpu = cpu_last_thread_sibling(cpu); - } - - /* - * Prevent processing of 'updates' from overflowing array - * where last entry filled in a 'next' pointer. - */ - if (i) - updates[i-1].next = NULL; - - pr_debug("Topology update for the following CPUs:\n"); - if (cpumask_weight(&updated_cpus)) { - for (ud = &updates[0]; ud; ud = ud->next) { - pr_debug("cpu %d moving from node %d " - "to %d\n", ud->cpu, - ud->old_nid, ud->new_nid); - } - } - - /* - * In cases where we have nothing to update (because the updates list - * is too short or because the new topology is same as the old one), - * skip invoking update_cpu_topology() via stop-machine(). This is - * necessary (and not just a fast-path optimization) since stop-machine - * can end up electing a random CPU to run update_cpu_topology(), and - * thus trick us into setting up incorrect cpu-node mappings (since - * 'updates' is kzalloc()'ed). - * - * And for the similar reason, we will skip all the following updating. - */ - if (!cpumask_weight(&updated_cpus)) - goto out; - - if (cpus_locked) - stop_machine_cpuslocked(update_cpu_topology, &updates[0], - &updated_cpus); - else - stop_machine(update_cpu_topology, &updates[0], &updated_cpus); - - /* - * Update the numa-cpu lookup table with the new mappings, even for - * offline CPUs. It is best to perform this update from the stop- - * machine context. - */ - if (cpus_locked) - stop_machine_cpuslocked(update_lookup_table, &updates[0], - cpumask_of(raw_smp_processor_id())); - else - stop_machine(update_lookup_table, &updates[0], - cpumask_of(raw_smp_processor_id())); - - for (ud = &updates[0]; ud; ud = ud->next) { - unregister_cpu_under_node(ud->cpu, ud->old_nid); - register_cpu_under_node(ud->cpu, ud->new_nid); - - dev = get_cpu_device(ud->cpu); - if (dev) - kobject_uevent(&dev->kobj, KOBJ_CHANGE); - cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask); - changed = 1; - } - -out: - kfree(updates); - return changed; + return 0; } int arch_update_cpu_topology(void) -- cgit v1.2.3 From b1815aeac7fde2dc3412daf2efaededd21cd58e0 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:30 -0500 Subject: powerpc/numa: remove timed_topology_update() timed_topology_update is a no-op now, so remove it and all call sites. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-11-nathanl@linux.ibm.com --- arch/powerpc/include/asm/topology.h | 5 ----- arch/powerpc/mm/numa.c | 9 --------- arch/powerpc/platforms/pseries/hotplug-cpu.c | 2 -- 3 files changed, 16 deletions(-) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 2db7ba789720..379e2cc3789f 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -97,7 +97,6 @@ extern int start_topology_update(void); extern int stop_topology_update(void); extern int prrn_is_enabled(void); extern int find_and_online_cpu_nid(int cpu); -extern int timed_topology_update(int nsecs); #else static inline int start_topology_update(void) { @@ -115,10 +114,6 @@ static inline int find_and_online_cpu_nid(int cpu) { return 0; } -static inline int timed_topology_update(int nsecs) -{ - return 0; -} #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b220e5b60140..6c579ac3e679 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1124,14 +1124,6 @@ u64 memory_hotplug_max(void) #ifdef CONFIG_PPC_SPLPAR static int topology_inited; -/* - * Change polling interval for associativity changes. - */ -int timed_topology_update(int nsecs) -{ - return 0; -} - /* * Retrieve the new associativity information for a virtual processor's * home node. @@ -1147,7 +1139,6 @@ static long vphn_get_associativity(unsigned long cpu, switch (rc) { case H_SUCCESS: dbg("VPHN hcall succeeded. Reset polling...\n"); - timed_topology_update(0); goto out; case H_FUNCTION: diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index d4b346355bb9..dbfabb185eb5 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -263,7 +263,6 @@ static int dlpar_offline_cpu(struct device_node *dn) break; cpu_maps_update_done(); - timed_topology_update(1); rc = device_offline(get_cpu_device(cpu)); if (rc) goto out; @@ -302,7 +301,6 @@ static int dlpar_online_cpu(struct device_node *dn) if (get_hard_smp_processor_id(cpu) != thread) continue; cpu_maps_update_done(); - timed_topology_update(1); find_and_online_cpu_nid(cpu); rc = device_online(get_cpu_device(cpu)); if (rc) { -- cgit v1.2.3 From 1835303e5690cbeef2c07a9a5416045475ddaa13 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:31 -0500 Subject: powerpc/numa: remove start/stop_topology_update() These APIs have become no-ops, so remove them and all call sites. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-12-nathanl@linux.ibm.com --- arch/powerpc/include/asm/topology.h | 10 ---------- arch/powerpc/mm/numa.c | 20 -------------------- arch/powerpc/platforms/pseries/mobility.c | 4 ---- arch/powerpc/platforms/pseries/suspend.c | 5 +---- 4 files changed, 1 insertion(+), 38 deletions(-) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 379e2cc3789f..537c638582eb 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -93,19 +93,9 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) #endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) -extern int start_topology_update(void); -extern int stop_topology_update(void); extern int prrn_is_enabled(void); extern int find_and_online_cpu_nid(int cpu); #else -static inline int start_topology_update(void) -{ - return 0; -} -static inline int stop_topology_update(void) -{ - return 0; -} static inline int prrn_is_enabled(void) { return 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6c579ac3e679..dec7ce3b5e67 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1157,8 +1157,6 @@ static long vphn_get_associativity(unsigned long cpu, , rc); break; } - - stop_topology_update(); out: return rc; } @@ -1212,22 +1210,6 @@ int arch_update_cpu_topology(void) return numa_update_cpu_topology(true); } -/* - * Start polling for associativity changes. - */ -int start_topology_update(void) -{ - return 0; -} - -/* - * Disable polling for VPHN associativity changes. - */ -int stop_topology_update(void) -{ - return 0; -} - int prrn_is_enabled(void) { return 0; @@ -1235,8 +1217,6 @@ int prrn_is_enabled(void) static int topology_update_init(void) { - start_topology_update(); - topology_inited = 1; return 0; } diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 10d982997736..c0b09f6f0ae3 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -388,8 +388,6 @@ static ssize_t migration_store(struct class *class, if (rc) return rc; - stop_topology_update(); - do { rc = rtas_ibm_suspend_me(streamid); if (rc == -EAGAIN) @@ -401,8 +399,6 @@ static ssize_t migration_store(struct class *class, post_mobility_fixup(); - start_topology_update(); - return count; } diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index f789693f61f4..81e0ac58d620 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -145,11 +145,8 @@ static ssize_t store_hibernate(struct device *dev, ssleep(1); } while (rc == -EAGAIN); - if (!rc) { - stop_topology_update(); + if (!rc) rc = pm_suspend(PM_SUSPEND_MEM); - start_topology_update(); - } stream_id = 0; -- cgit v1.2.3 From 91713ac377859893a7798999cb2e3a388d8ae710 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:32 -0500 Subject: powerpc/rtasd: simplify handle_rtas_event(), emit message on events prrn_is_enabled() always returns false/0, so handle_rtas_event() can be simplified and some dead code can be removed. Use machine_is() instead of #ifdef to run this code only on pseries, and add an informational ratelimited message that we are ignoring the events. PRRN events are relatively rare in normal operation and usually arise from operator-initiated actions such as a DPO (Dynamic Platform Optimizer) run. Eventually we do want to consume these events and update the device tree, but that needs more care to be safe vs LPM and DLPAR. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-13-nathanl@linux.ibm.com --- arch/powerpc/kernel/rtasd.c | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 89b798f8f656..8561dfb33f24 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -273,37 +273,15 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) } } -#ifdef CONFIG_PPC_PSERIES -static void handle_prrn_event(s32 scope) -{ - /* - * For PRRN, we must pass the negative of the scope value in - * the RTAS event. - */ - pseries_devicetree_update(-scope); - numa_update_cpu_topology(false); -} - static void handle_rtas_event(const struct rtas_error_log *log) { - if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled()) + if (!machine_is(pseries)) return; - /* For PRRN Events the extended log length is used to denote - * the scope for calling rtas update-nodes. - */ - handle_prrn_event(rtas_error_extended_log_length(log)); + if (rtas_error_type(log) == RTAS_TYPE_PRRN) + pr_info_ratelimited("Platform resource reassignment ignored.\n"); } -#else - -static void handle_rtas_event(const struct rtas_error_log *log) -{ - return; -} - -#endif - static int rtas_log_open(struct inode * inode, struct file * file) { return 0; -- cgit v1.2.3 From 042ef7cc43f4571d8cbe44a7c735ab6622809142 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:33 -0500 Subject: powerpc/numa: remove prrn_is_enabled() All users of this prrn_is_enabled() are gone; remove it. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-14-nathanl@linux.ibm.com --- arch/powerpc/include/asm/topology.h | 5 ----- arch/powerpc/mm/numa.c | 5 ----- 2 files changed, 10 deletions(-) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 537c638582eb..658aad65912b 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -93,13 +93,8 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) #endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) -extern int prrn_is_enabled(void); extern int find_and_online_cpu_nid(int cpu); #else -static inline int prrn_is_enabled(void) -{ - return 0; -} static inline int find_and_online_cpu_nid(int cpu) { return 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index dec7ce3b5e67..26fcc947dd2d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1210,11 +1210,6 @@ int arch_update_cpu_topology(void) return numa_update_cpu_topology(true); } -int prrn_is_enabled(void) -{ - return 0; -} - static int topology_update_init(void) { topology_inited = 1; -- cgit v1.2.3 From cdf082c4570f186d608aca688f2cc872b014558a Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:34 -0500 Subject: powerpc/numa: remove arch_update_cpu_topology Since arch_update_cpu_topology() doesn't do anything on powerpc now, remove it and associated dead code. Signed-off-by: Nathan Lynch Reviewed-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-15-nathanl@linux.ibm.com --- arch/powerpc/include/asm/topology.h | 6 ------ arch/powerpc/mm/numa.c | 10 ---------- 2 files changed, 16 deletions(-) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 658aad65912b..b2c346c5e16f 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -43,7 +43,6 @@ extern void __init dump_numa_cpu_topology(void); extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); -extern int numa_update_cpu_topology(bool cpus_locked); static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) { @@ -78,11 +77,6 @@ static inline void sysfs_remove_device_from_node(struct device *dev, { } -static inline int numa_update_cpu_topology(bool cpus_locked) -{ - return 0; -} - static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 26fcc947dd2d..e437a9ac4956 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1200,16 +1200,6 @@ int find_and_online_cpu_nid(int cpu) return new_nid; } -int numa_update_cpu_topology(bool cpus_locked) -{ - return 0; -} - -int arch_update_cpu_topology(void) -{ - return numa_update_cpu_topology(true); -} - static int topology_update_init(void) { topology_inited = 1; -- cgit v1.2.3 From bb7c3d36e3b18aa02d34358ae75e1b91f69a968b Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:35 -0500 Subject: powerpc/pseries: remove prrn special case from DT update path pseries_devicetree_update() is no longer called with PRRN_SCOPE. The purpose of prrn_update_node() was to remove and then add back a LMB whose NUMA assignment had changed. This has never been reliable, and this codepath has been default-disabled for several releases. Remove prrn_update_node(). Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-16-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index c0b09f6f0ae3..78cd772a579b 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -244,29 +244,6 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) return rc; } -static void prrn_update_node(__be32 phandle) -{ - struct pseries_hp_errorlog hp_elog; - struct device_node *dn; - - /* - * If a node is found from a the given phandle, the phandle does not - * represent the drc index of an LMB and we can ignore. - */ - dn = of_find_node_by_phandle(be32_to_cpu(phandle)); - if (dn) { - of_node_put(dn); - return; - } - - hp_elog.resource = PSERIES_HP_ELOG_RESOURCE_MEM; - hp_elog.action = PSERIES_HP_ELOG_ACTION_READD; - hp_elog.id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; - hp_elog._drc_u.drc_index = phandle; - - handle_dlpar_errorlog(&hp_elog); -} - int pseries_devicetree_update(s32 scope) { char *rtas_buf; @@ -305,10 +282,6 @@ int pseries_devicetree_update(s32 scope) break; case UPDATE_DT_NODE: update_dt_node(phandle, scope); - - if (scope == PRRN_SCOPE) - prrn_update_node(phandle); - break; case ADD_DT_NODE: drc_index = *data++; -- cgit v1.2.3 From 4abe60c6448bf1dba48689450ad1348e5fc6f7b7 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:36 -0500 Subject: powerpc/pseries: remove memory "re-add" implementation dlpar_memory() no longer has any callers which pass PSERIES_HP_ELOG_ACTION_READD. Remove this case and the corresponding unreachable code. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-17-nathanl@linux.ibm.com --- arch/powerpc/include/asm/rtas.h | 1 - arch/powerpc/platforms/pseries/hotplug-memory.c | 42 ------------------------- 2 files changed, 43 deletions(-) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 0107d724e9da..55f9a154c95d 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -215,7 +215,6 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect) #define PSERIES_HP_ELOG_ACTION_ADD 1 #define PSERIES_HP_ELOG_ACTION_REMOVE 2 -#define PSERIES_HP_ELOG_ACTION_READD 3 #define PSERIES_HP_ELOG_ID_DRC_NAME 1 #define PSERIES_HP_ELOG_ID_DRC_INDEX 2 diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 5ace2f9a277e..67ece3ac9ac2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -487,40 +487,6 @@ static int dlpar_memory_remove_by_index(u32 drc_index) return rc; } -static int dlpar_memory_readd_by_index(u32 drc_index) -{ - struct drmem_lmb *lmb; - int lmb_found; - int rc; - - pr_info("Attempting to update LMB, drc index %x\n", drc_index); - - lmb_found = 0; - for_each_drmem_lmb(lmb) { - if (lmb->drc_index == drc_index) { - lmb_found = 1; - rc = dlpar_remove_lmb(lmb); - if (!rc) { - rc = dlpar_add_lmb(lmb); - if (rc) - dlpar_release_drc(lmb->drc_index); - } - break; - } - } - - if (!lmb_found) - rc = -EINVAL; - - if (rc) - pr_info("Failed to update memory at %llx\n", - lmb->base_addr); - else - pr_info("Memory at %llx was updated\n", lmb->base_addr); - - return rc; -} - static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) { struct drmem_lmb *lmb, *start_lmb, *end_lmb; @@ -617,10 +583,6 @@ static int dlpar_memory_remove_by_index(u32 drc_index) { return -EOPNOTSUPP; } -static int dlpar_memory_readd_by_index(u32 drc_index) -{ - return -EOPNOTSUPP; -} static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) { @@ -902,10 +864,6 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) break; } - break; - case PSERIES_HP_ELOG_ACTION_READD: - drc_index = hp_elog->_drc_u.drc_index; - rc = dlpar_memory_readd_by_index(drc_index); break; default: pr_err("Invalid action (%d) specified\n", hp_elog->action); -- cgit v1.2.3 From 38c392cef19019457ddcfb197ff3d9c5267698e6 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:37 -0500 Subject: powerpc/pseries: remove dlpar_cpu_readd() dlpar_cpu_readd() is unused now. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-18-nathanl@linux.ibm.com --- arch/powerpc/include/asm/topology.h | 1 - arch/powerpc/platforms/pseries/hotplug-cpu.c | 19 ------------------- 2 files changed, 20 deletions(-) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index b2c346c5e16f..f0b6300e7dd3 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -115,7 +115,6 @@ int get_physical_package_id(int cpu); #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu)) -int dlpar_cpu_readd(int cpu); #endif #endif diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index dbfabb185eb5..4bad7a83addc 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -779,25 +779,6 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add) return rc; } -int dlpar_cpu_readd(int cpu) -{ - struct device_node *dn; - struct device *dev; - u32 drc_index; - int rc; - - dev = get_cpu_device(cpu); - dn = dev->of_node; - - rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); - - rc = dlpar_cpu_remove_by_index(drc_index); - if (!rc) - rc = dlpar_cpu_add(drc_index); - - return rc; -} - int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) { u32 count, drc_index; -- cgit v1.2.3 From e978a3ccaa714b5ff125857d2cbecbb6fdf6c094 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Jun 2020 00:12:38 -0500 Subject: powerpc/pseries: remove obsolete memory hotplug DT notifier code pseries_update_drconf_memory() runs from a DT notifier in response to an update to the ibm,dynamic-memory property of the /ibm,dynamic-reconfiguration-memory node. This property is an older less compact format than the ibm,dynamic-memory-v2 property used in most currently supported firmwares. There has never been an equivalent function for the v2 property. pseries_update_drconf_memory() compares the 'assigned' flag for each LMB in the old vs new properties and adds or removes the block accordingly. However it appears to be of no actual utility: * Partition suspension and PRRNs are specified only to change LMBs' NUMA affinity information. This notifier should be a no-op for those scenarios since the assigned flags should not change. * The memory hotplug/DLPAR path has a hack which short-circuits execution of the notifier: dlpar_memory() ... rtas_hp_event = true; drmem_update_dt() of_update_property() pseries_memory_notifier() pseries_update_drconf_memory() if (rtas_hp_event) return; So this code only makes sense as a relic of the time when more of the DLPAR workflow took place in user space. I don't see a purpose for it now. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612051238.1007764-19-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/hotplug-memory.c | 65 +------------------------ 1 file changed, 1 insertion(+), 64 deletions(-) diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 67ece3ac9ac2..73a5dcd977e1 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -22,8 +22,6 @@ #include #include "pseries.h" -static bool rtas_hp_event; - unsigned long pseries_memory_block_size(void) { struct device_node *np; @@ -871,11 +869,8 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) break; } - if (!rc) { - rtas_hp_event = true; + if (!rc) rc = drmem_update_dt(); - rtas_hp_event = false; - } unlock_device_hotplug(); return rc; @@ -911,60 +906,6 @@ static int pseries_add_mem_node(struct device_node *np) return (ret < 0) ? -EINVAL : 0; } -static int pseries_update_drconf_memory(struct of_reconfig_data *pr) -{ - struct of_drconf_cell_v1 *new_drmem, *old_drmem; - unsigned long memblock_size; - u32 entries; - __be32 *p; - int i, rc = -EINVAL; - - if (rtas_hp_event) - return 0; - - memblock_size = pseries_memory_block_size(); - if (!memblock_size) - return -EINVAL; - - if (!pr->old_prop) - return 0; - - p = (__be32 *) pr->old_prop->value; - if (!p) - return -EINVAL; - - /* The first int of the property is the number of lmb's described - * by the property. This is followed by an array of of_drconf_cell - * entries. Get the number of entries and skip to the array of - * of_drconf_cell's. - */ - entries = be32_to_cpu(*p++); - old_drmem = (struct of_drconf_cell_v1 *)p; - - p = (__be32 *)pr->prop->value; - p++; - new_drmem = (struct of_drconf_cell_v1 *)p; - - for (i = 0; i < entries; i++) { - if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && - (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { - rc = pseries_remove_memblock( - be64_to_cpu(old_drmem[i].base_addr), - memblock_size); - break; - } else if ((!(be32_to_cpu(old_drmem[i].flags) & - DRCONF_MEM_ASSIGNED)) && - (be32_to_cpu(new_drmem[i].flags) & - DRCONF_MEM_ASSIGNED)) { - rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), - memblock_size); - rc = (rc < 0) ? -EINVAL : 0; - break; - } - } - return rc; -} - static int pseries_memory_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -978,10 +919,6 @@ static int pseries_memory_notifier(struct notifier_block *nb, case OF_RECONFIG_DETACH_NODE: err = pseries_remove_mem_node(rd->dn); break; - case OF_RECONFIG_UPDATE_PROPERTY: - if (!strcmp(rd->prop->name, "ibm,dynamic-memory")) - err = pseries_update_drconf_memory(rd); - break; } return notifier_from_errno(err); } -- cgit v1.2.3 From 1a8f0886a6008c98a926bdeca49f2ef33015a491 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Thu, 9 Jul 2020 10:48:35 +0530 Subject: powerpc/perf/hv-24x7: Add cpu hotplug support Patch here adds cpu hotplug functions to hv_24x7 pmu. A new cpuhp_state "CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE" enum is added. The online callback function updates the cpumask only if its empty. As the primary intention of adding hotplug support is to designate a CPU to make HCALL to collect the counter data. The offline function test and clear corresponding cpu in a cpumask and update cpumask to any other active cpu. Signed-off-by: Kajol Jain Reviewed-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709051836.723765-2-kjain@linux.ibm.com --- arch/powerpc/perf/hv-24x7.c | 46 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 2 files changed, 47 insertions(+) diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index db213eb7cb02..93b4700dcf8c 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -31,6 +31,8 @@ static int interface_version; /* Whether we have to aggregate result data for some domains. */ static bool aggregate_result_elements; +static cpumask_t hv_24x7_cpumask; + static bool domain_is_valid(unsigned domain) { switch (domain) { @@ -1641,6 +1643,45 @@ static struct pmu h_24x7_pmu = { .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; +static int ppc_hv_24x7_cpu_online(unsigned int cpu) +{ + if (cpumask_empty(&hv_24x7_cpumask)) + cpumask_set_cpu(cpu, &hv_24x7_cpumask); + + return 0; +} + +static int ppc_hv_24x7_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if exiting cpu is used for collecting 24x7 events */ + if (!cpumask_test_and_clear_cpu(cpu, &hv_24x7_cpumask)) + return 0; + + /* Find a new cpu to collect 24x7 events */ + target = cpumask_last(cpu_active_mask); + + if (target < 0 || target >= nr_cpu_ids) { + pr_err("hv_24x7: CPU hotplug init failed\n"); + return -1; + } + + /* Migrate 24x7 events to the new target */ + cpumask_set_cpu(target, &hv_24x7_cpumask); + perf_pmu_migrate_context(&h_24x7_pmu, cpu, target); + + return 0; +} + +static int hv_24x7_cpu_hotplug_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, + "perf/powerpc/hv_24x7:online", + ppc_hv_24x7_cpu_online, + ppc_hv_24x7_cpu_offline); +} + static int hv_24x7_init(void) { int r; @@ -1685,6 +1726,11 @@ static int hv_24x7_init(void) if (r) return r; + /* init cpuhotplug */ + r = hv_24x7_cpu_hotplug_init(); + if (r) + return r; + r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); if (r) return r; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 191772d4a4d7..a2710e654b64 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -181,6 +181,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, -- cgit v1.2.3 From 792f73f747b82f6cb191a323e1f5755d33149b50 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Thu, 9 Jul 2020 10:48:36 +0530 Subject: powerpc/hv-24x7: Add sysfs files inside hv-24x7 device to show cpumask Patch here adds a cpumask attr to hv_24x7 pmu along with ABI documentation. Primary use to expose the cpumask is for the perf tool which has the capability to parse the driver sysfs folder and understand the cpumask file. Having cpumask file will reduce the number of perf command line parameters (will avoid "-C" option in the perf tool command line). It can also notify the user which is the current cpu used to retrieve the counter data. command:# cat /sys/devices/hv_24x7/interface/cpumask 0 Signed-off-by: Kajol Jain Reviewed-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709051836.723765-3-kjain@linux.ibm.com --- Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 | 7 +++++++ arch/powerpc/perf/hv-24x7.c | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 index e8698afcd952..f7e32f218f73 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 @@ -43,6 +43,13 @@ Description: read only This sysfs interface exposes the number of cores per chip present in the system. +What: /sys/devices/hv_24x7/interface/cpumask +Date: July 2020 +Contact: Linux on PowerPC Developer List +Description: read only + This sysfs file exposes the cpumask which is designated to make + HCALLs to retrieve hv-24x7 pmu event counter data. + What: /sys/bus/event_source/devices/hv_24x7/event_descs/ Date: February 2014 Contact: Linux on PowerPC Developer List diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 93b4700dcf8c..cdb7bfbd157e 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -448,6 +448,12 @@ static ssize_t device_show_string(struct device *dev, return sprintf(buf, "%s\n", (char *)d->var); } +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return cpumap_print_to_pagebuf(true, buf, &hv_24x7_cpumask); +} + static ssize_t sockets_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1115,6 +1121,7 @@ static DEVICE_ATTR_RO(domains); static DEVICE_ATTR_RO(sockets); static DEVICE_ATTR_RO(chipspersocket); static DEVICE_ATTR_RO(coresperchip); +static DEVICE_ATTR_RO(cpumask); static struct bin_attribute *if_bin_attrs[] = { &bin_attr_catalog, @@ -1128,6 +1135,7 @@ static struct attribute *if_attrs[] = { &dev_attr_sockets.attr, &dev_attr_chipspersocket.attr, &dev_attr_coresperchip.attr, + &dev_attr_cpumask.attr, NULL, }; -- cgit v1.2.3 From db551f8cc6a33f79cd2d2a6cfd1903f044e828a8 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Wed, 24 Jun 2020 17:00:33 +0530 Subject: powerpc/ppc-opcode: Introduce PPC_RAW_* macros for base instruction encoding Introduce PPC_RAW_* macros to have all the bare encoding of ppc instructions. Move `VSX_XX*()` and `TMRN()` macros up to reuse it. Signed-off-by: Balamuruhan S Tested-by: Naveen N. Rao Acked-by: Naveen N. Rao [mpe: Add DCBFPS, DCBSTPS, PHWSYNC, PLWSYNC] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624113038.908074-2-bala24@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 93 ++++++++++++++++++++++++++++++++--- 1 file changed, 86 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 3ee6fad902bc..4aac2165da21 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -437,6 +437,92 @@ #define __PPC_EH(eh) 0 #endif +/* Base instruction encoding */ +#define PPC_RAW_CP_ABORT (PPC_INST_CP_ABORT) +#define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DARN(t, l) (PPC_INST_DARN | ___PPC_RT(t) | (((l) & 0x3) << 16)) +#define PPC_RAW_DCBAL(a, b) (PPC_INST_DCBAL | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_DCBZL(a, b) (PPC_INST_DCBZL | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_LQARX(t, a, b, eh) (PPC_INST_LQARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_RAW_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_RAW_LWARX(t, a, b, eh) (PPC_INST_LWARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_RAW_PHWSYNC (PPC_INST_PHWSYNC) +#define PPC_RAW_PLWSYNC (PPC_INST_PLWSYNC) +#define PPC_RAW_STQCX(t, a, b) (PPC_INST_STQCX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MADDHD(t, a, b, c) (PPC_INST_MADDHD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_RAW_MADDHDU(t, a, b, c) (PPC_INST_MADDHDU | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_RAW_MADDLD(t, a, b, c) (PPC_INST_MADDLD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_RAW_MSGSND(b) (PPC_INST_MSGSND | ___PPC_RB(b)) +#define PPC_RAW_MSGSYNC (PPC_INST_MSGSYNC) +#define PPC_RAW_MSGCLR(b) (PPC_INST_MSGCLR | ___PPC_RB(b)) +#define PPC_RAW_MSGSNDP(b) (PPC_INST_MSGSNDP | ___PPC_RB(b)) +#define PPC_RAW_MSGCLRP(b) (PPC_INST_MSGCLRP | ___PPC_RB(b)) +#define PPC_RAW_PASTE(a, b) (PPC_INST_PASTE | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_POPCNTB(a, s) (PPC_INST_POPCNTB | __PPC_RA(a) | __PPC_RS(s)) +#define PPC_RAW_POPCNTD(a, s) (PPC_INST_POPCNTD | __PPC_RA(a) | __PPC_RS(s)) +#define PPC_RAW_POPCNTW(a, s) (PPC_INST_POPCNTW | __PPC_RA(a) | __PPC_RS(s)) +#define PPC_RAW_RFCI (PPC_INST_RFCI) +#define PPC_RAW_RFDI (PPC_INST_RFDI) +#define PPC_RAW_RFMCI (PPC_INST_RFMCI) +#define PPC_RAW_TLBILX(t, a, b) (PPC_INST_TLBILX | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_WAIT(w) (PPC_INST_WAIT | __PPC_WC(w)) +#define PPC_RAW_TLBIE(lp, a) (PPC_INST_TLBIE | ___PPC_RB(a) | ___PPC_RS(lp)) +#define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \ + (PPC_INST_TLBIE | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) +#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \ + (PPC_INST_TLBIEL | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) +#define PPC_RAW_TLBSRX_DOT(a, b) (PPC_INST_TLBSRX_DOT | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_TLBIVAX(a, b) (PPC_INST_TLBIVAX | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATWE(s, a, w) (PPC_INST_ERATWE | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) +#define PPC_RAW_ERATRE(s, a, w) (PPC_INST_ERATRE | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) +#define PPC_RAW_ERATILX(t, a, b) (PPC_INST_ERATILX | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATIVAX(s, a, b) (PPC_INST_ERATIVAX | __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATSX(t, a, w) (PPC_INST_ERATSX | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATSX_DOT(t, a, w) (PPC_INST_ERATSX_DOT | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_SLBFEE_DOT(t, b) (PPC_INST_SLBFEE | __PPC_RT(t) | __PPC_RB(b)) +#define __PPC_RAW_SLBFEE_DOT(t, b) (PPC_INST_SLBFEE | ___PPC_RT(t) | ___PPC_RB(b)) +#define PPC_RAW_ICBT(c, a, b) (PPC_INST_ICBT | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_LBZCIX(t, a, b) (PPC_INST_LBZCIX | __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_STBCIX(s, a, b) (PPC_INST_STBCIX | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_DCBFPS(a, b) (PPC_INST_DCBF | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21)) +#define PPC_RAW_DCBSTPS(a, b) (PPC_INST_DCBF | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21)) +/* + * Define what the VSX XX1 form instructions will look like, then add + * the 128 bit load store instructions based on that. + */ +#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) +#define PPC_RAW_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), a, b)) +#define PPC_RAW_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), a, b)) +#define PPC_RAW_MFVRD(a, t) (PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, R0)) +#define PPC_RAW_MTVRD(t, a) (PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, R0)) +#define PPC_RAW_VPMSUMW(t, a, b) (PPC_INST_VPMSUMW | VSX_XX3((t), a, b)) +#define PPC_RAW_VPMSUMD(t, a, b) (PPC_INST_VPMSUMD | VSX_XX3((t), a, b)) +#define PPC_RAW_XXLOR(t, a, b) (PPC_INST_XXLOR | VSX_XX3((t), a, b)) +#define PPC_RAW_XXSWAPD(t, a) (PPC_INST_XXSWAPD | VSX_XX3((t), a, a)) +#define PPC_RAW_XVCPSGNDP(t, a, b) ((PPC_INST_XVCPSGNDP | VSX_XX3((t), (a), (b)))) +#define PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc) \ + ((PPC_INST_VPERMXOR | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6))) +#define PPC_RAW_NAP (PPC_INST_NAP) +#define PPC_RAW_SLEEP (PPC_INST_SLEEP) +#define PPC_RAW_WINKLE (PPC_INST_WINKLE) +#define PPC_RAW_STOP (PPC_INST_STOP) +#define PPC_RAW_CLRBHRB (PPC_INST_CLRBHRB) +#define PPC_RAW_MFBHRBE(r, n) (PPC_INST_BHRBE | __PPC_RT(r) | (((n) & 0x3ff) << 11)) +#define PPC_RAW_TRECHKPT (PPC_INST_TRECHKPT) +#define PPC_RAW_TRECLAIM(r) (PPC_INST_TRECLAIM | __PPC_RA(r)) +#define PPC_RAW_TABORT(r) (PPC_INST_TABORT | __PPC_RA(r)) +#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6)) +#define PPC_RAW_MTTMR(tmr, r) (PPC_INST_MTTMR | TMRN(tmr) | ___PPC_RS(r)) +#define PPC_RAW_MFTMR(tmr, r) (PPC_INST_MFTMR | TMRN(tmr) | ___PPC_RT(r)) +#define PPC_RAW_ICSWX(s, a, b) (PPC_INST_ICSWX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ICSWEPX(s, a, b) (PPC_INST_ICSWEPX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_SLBIA(IH) (PPC_INST_SLBIA | (((IH) & 0x7) << 21)) +#define PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb) \ + (PPC_INST_VCMPEQUD | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) +#define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \ + (PPC_INST_VCMPEQUB | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) + /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) #define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) @@ -547,12 +633,6 @@ #define PPC_PHWSYNC stringify_in_c(.long PPC_INST_PHWSYNC) #define PPC_PLWSYNC stringify_in_c(.long PPC_INST_PLWSYNC) -/* - * Define what the VSX XX1 form instructions will look like, then add - * the 128 bit load store instructions based on that. - */ -#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) -#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) #define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \ VSX_XX1((s), a, b)) #define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ @@ -597,7 +677,6 @@ | __PPC_RA(r)) /* book3e thread control instructions */ -#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6)) #define MTTMR(tmr, r) stringify_in_c(.long PPC_INST_MTTMR | \ TMRN(tmr) | ___PPC_RS(r)) #define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \ -- cgit v1.2.3 From 1d33dd84080f4a430bde2fc363d9b70f0a010c19 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Wed, 24 Jun 2020 17:00:34 +0530 Subject: powerpc/ppc-opcode: Move ppc instruction encoding from test_emulate_step Few ppc instructions are encoded in test_emulate_step.c, consolidate them and use it from ppc-opcode.h Signed-off-by: Balamuruhan S Tested-by: Naveen N. Rao Acked-by: Naveen N. Rao Acked-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624113038.908074-3-bala24@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 18 ++++ arch/powerpc/lib/test_emulate_step.c | 155 ++++++++++++---------------------- 2 files changed, 74 insertions(+), 99 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 4aac2165da21..49701d4ea0e1 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -76,6 +76,9 @@ #define __REGA0_R30 30 #define __REGA0_R31 31 +#define IMM_L(i) ((uintptr_t)(i) & 0xffff) +#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc) + /* opcode and xopcode for instructions */ #define OP_TRAP 3 #define OP_TRAP_64 2 @@ -522,6 +525,21 @@ (PPC_INST_VCMPEQUD | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) #define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \ (PPC_INST_VCMPEQUB | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) +#define PPC_RAW_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) +#define PPC_RAW_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i)) +#define PPC_RAW_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) +#define PPC_RAW_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 46af80279ebc..988c734e7370 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -13,49 +13,6 @@ #include #include -#define IMM_L(i) ((uintptr_t)(i) & 0xffff) -#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc) - -/* - * Defined with TEST_ prefix so it does not conflict with other - * definitions. - */ -#define TEST_LD(r, base, i) ppc_inst(PPC_INST_LD | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_DS(i)) -#define TEST_LWZ(r, base, i) ppc_inst(PPC_INST_LWZ | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define TEST_LWZX(t, a, b) ppc_inst(PPC_INST_LWZX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STD(r, base, i) ppc_inst(PPC_INST_STD | ___PPC_RS(r) | \ - ___PPC_RA(base) | IMM_DS(i)) -#define TEST_LDARX(t, a, b, eh) ppc_inst(PPC_INST_LDARX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b) | \ - __PPC_EH(eh)) -#define TEST_STDCX(s, a, b) ppc_inst(PPC_INST_STDCX | ___PPC_RS(s) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LFSX(t, a, b) ppc_inst(PPC_INST_LFSX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STFSX(s, a, b) ppc_inst(PPC_INST_STFSX | ___PPC_RS(s) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LFDX(t, a, b) ppc_inst(PPC_INST_LFDX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STFDX(s, a, b) ppc_inst(PPC_INST_STFDX | ___PPC_RS(s) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LVX(t, a, b) ppc_inst(PPC_INST_LVX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STVX(s, a, b) ppc_inst(PPC_INST_STVX | ___PPC_RS(s) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LXVD2X(s, a, b) ppc_inst(PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) -#define TEST_STXVD2X(s, a, b) ppc_inst(PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) -#define TEST_ADD(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_ADD_DOT(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b) | 0x1) -#define TEST_ADDC(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_ADDC_DOT(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b) | 0x1) - #define MAX_SUBTESTS 16 #define IGNORE_GPR(n) (0x1UL << (n)) @@ -105,7 +62,7 @@ static void __init test_ld(void) regs.gpr[3] = (unsigned long) &a; /* ld r5, 0(r3) */ - stepped = emulate_step(®s, TEST_LD(5, 3, 0)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LD(5, 3, 0))); if (stepped == 1 && regs.gpr[5] == a) show_result("ld", "PASS"); @@ -123,7 +80,7 @@ static void __init test_lwz(void) regs.gpr[3] = (unsigned long) &a; /* lwz r5, 0(r3) */ - stepped = emulate_step(®s, TEST_LWZ(5, 3, 0)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LWZ(5, 3, 0))); if (stepped == 1 && regs.gpr[5] == a) show_result("lwz", "PASS"); @@ -143,7 +100,7 @@ static void __init test_lwzx(void) regs.gpr[5] = 0x8765; /* lwzx r5, r3, r4 */ - stepped = emulate_step(®s, TEST_LWZX(5, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LWZX(5, 3, 4))); if (stepped == 1 && regs.gpr[5] == a[2]) show_result("lwzx", "PASS"); else @@ -161,7 +118,7 @@ static void __init test_std(void) regs.gpr[5] = 0x5678; /* std r5, 0(r3) */ - stepped = emulate_step(®s, TEST_STD(5, 3, 0)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_STD(5, 3, 0))); if (stepped == 1 && regs.gpr[5] == a) show_result("std", "PASS"); else @@ -186,7 +143,7 @@ static void __init test_ldarx_stdcx(void) regs.gpr[5] = 0x5678; /* ldarx r5, r3, r4, 0 */ - stepped = emulate_step(®s, TEST_LDARX(5, 3, 4, 0)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LDARX(5, 3, 4, 0))); /* * Don't touch 'a' here. Touching 'a' can do Load/store @@ -204,7 +161,7 @@ static void __init test_ldarx_stdcx(void) regs.gpr[5] = 0x9ABC; /* stdcx. r5, r3, r4 */ - stepped = emulate_step(®s, TEST_STDCX(5, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_STDCX(5, 3, 4))); /* * Two possible scenarios that indicates successful emulation @@ -244,7 +201,7 @@ static void __init test_lfsx_stfsx(void) regs.gpr[4] = 0; /* lfsx frt10, r3, r4 */ - stepped = emulate_step(®s, TEST_LFSX(10, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LFSX(10, 3, 4))); if (stepped == 1) show_result("lfsx", "PASS"); @@ -257,7 +214,7 @@ static void __init test_lfsx_stfsx(void) c.a = 678.91; /* stfsx frs10, r3, r4 */ - stepped = emulate_step(®s, TEST_STFSX(10, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_STFSX(10, 3, 4))); if (stepped == 1 && c.b == cached_b) show_result("stfsx", "PASS"); @@ -287,7 +244,7 @@ static void __init test_lfdx_stfdx(void) regs.gpr[4] = 0; /* lfdx frt10, r3, r4 */ - stepped = emulate_step(®s, TEST_LFDX(10, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LFDX(10, 3, 4))); if (stepped == 1) show_result("lfdx", "PASS"); @@ -300,7 +257,7 @@ static void __init test_lfdx_stfdx(void) c.a = 987654.32; /* stfdx frs10, r3, r4 */ - stepped = emulate_step(®s, TEST_STFDX(10, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_STFDX(10, 3, 4))); if (stepped == 1 && c.b == cached_b) show_result("stfdx", "PASS"); @@ -346,7 +303,7 @@ static void __init test_lvx_stvx(void) regs.gpr[4] = 0; /* lvx vrt10, r3, r4 */ - stepped = emulate_step(®s, TEST_LVX(10, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LVX(10, 3, 4))); if (stepped == 1) show_result("lvx", "PASS"); @@ -362,7 +319,7 @@ static void __init test_lvx_stvx(void) c.b[3] = 498532; /* stvx vrs10, r3, r4 */ - stepped = emulate_step(®s, TEST_STVX(10, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_STVX(10, 3, 4))); if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) @@ -403,7 +360,7 @@ static void __init test_lxvd2x_stxvd2x(void) regs.gpr[4] = 0; /* lxvd2x vsr39, r3, r4 */ - stepped = emulate_step(®s, TEST_LXVD2X(39, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_LXVD2X(39, R3, R4))); if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) { show_result("lxvd2x", "PASS"); @@ -423,7 +380,7 @@ static void __init test_lxvd2x_stxvd2x(void) c.b[3] = 4; /* stxvd2x vsr39, r3, r4 */ - stepped = emulate_step(®s, TEST_STXVD2X(39, 3, 4)); + stepped = emulate_step(®s, ppc_inst(PPC_RAW_STXVD2X(39, R3, R4))); if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && cached_b[2] == c.b[2] && cached_b[3] == c.b[3] && @@ -485,7 +442,7 @@ static struct compute_test compute_tests[] = { .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, @@ -493,7 +450,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, @@ -501,7 +458,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MAX, RB = LONG_MAX", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, @@ -509,7 +466,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, @@ -517,7 +474,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = 0x1", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, @@ -525,7 +482,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MIN", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, @@ -533,7 +490,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MAX", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, @@ -541,7 +498,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MAX, RB = INT_MAX", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, @@ -549,7 +506,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, @@ -557,7 +514,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = 0x1", - .instr = TEST_ADD(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, @@ -571,7 +528,7 @@ static struct compute_test compute_tests[] = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .flags = IGNORE_CCR, - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, @@ -579,7 +536,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, @@ -588,7 +545,7 @@ static struct compute_test compute_tests[] = { { .descr = "RA = LONG_MAX, RB = LONG_MAX", .flags = IGNORE_CCR, - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, @@ -596,7 +553,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, @@ -604,7 +561,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = 0x1", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, @@ -612,7 +569,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MIN", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, @@ -620,7 +577,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MAX", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, @@ -628,7 +585,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MAX, RB = INT_MAX", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, @@ -636,7 +593,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, @@ -644,7 +601,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = 0x1", - .instr = TEST_ADD_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, @@ -657,7 +614,7 @@ static struct compute_test compute_tests[] = { .subtests = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, @@ -665,7 +622,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, @@ -673,7 +630,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MAX, RB = LONG_MAX", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, @@ -681,7 +638,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, @@ -689,7 +646,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = 0x1", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, @@ -697,7 +654,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MIN", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, @@ -705,7 +662,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MAX", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, @@ -713,7 +670,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MAX, RB = INT_MAX", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, @@ -721,7 +678,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, @@ -729,7 +686,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = 0x1", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, @@ -737,7 +694,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN", - .instr = TEST_ADDC(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN | (uint)INT_MIN, .gpr[22] = LONG_MIN | (uint)INT_MIN, @@ -751,7 +708,7 @@ static struct compute_test compute_tests[] = { { .descr = "RA = LONG_MIN, RB = LONG_MIN", .flags = IGNORE_CCR, - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MIN, @@ -759,7 +716,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MIN, RB = LONG_MAX", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN, .gpr[22] = LONG_MAX, @@ -768,7 +725,7 @@ static struct compute_test compute_tests[] = { { .descr = "RA = LONG_MAX, RB = LONG_MAX", .flags = IGNORE_CCR, - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MAX, .gpr[22] = LONG_MAX, @@ -776,7 +733,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = ULONG_MAX", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = ULONG_MAX, @@ -784,7 +741,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = ULONG_MAX, RB = 0x1", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = ULONG_MAX, .gpr[22] = 0x1, @@ -792,7 +749,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MIN", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MIN, @@ -800,7 +757,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MIN, RB = INT_MAX", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MIN, .gpr[22] = INT_MAX, @@ -808,7 +765,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = INT_MAX, RB = INT_MAX", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = INT_MAX, .gpr[22] = INT_MAX, @@ -816,7 +773,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = UINT_MAX", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = UINT_MAX, @@ -824,7 +781,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = UINT_MAX, RB = 0x1", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = UINT_MAX, .gpr[22] = 0x1, @@ -832,7 +789,7 @@ static struct compute_test compute_tests[] = { }, { .descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN", - .instr = TEST_ADDC_DOT(20, 21, 22), + .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)), .regs = { .gpr[21] = LONG_MIN | (uint)INT_MIN, .gpr[22] = LONG_MIN | (uint)INT_MIN, -- cgit v1.2.3 From 0654186510a40e7e1fa788cb941d1a156ba2dcb2 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Wed, 24 Jun 2020 17:00:35 +0530 Subject: powerpc/bpf_jit: Reuse instruction macros from ppc-opcode.h Remove duplicate macro definitions from bpf_jit.h and reuse the macros from ppc-opcode.h Signed-off-by: Balamuruhan S Tested-by: Naveen N. Rao Acked-by: Naveen N. Rao Acked-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624113038.908074-4-bala24@linux.ibm.com --- arch/powerpc/net/bpf_jit.h | 18 +----------------- arch/powerpc/net/bpf_jit32.h | 10 +++++----- arch/powerpc/net/bpf_jit64.h | 4 ++-- arch/powerpc/net/bpf_jit_comp.c | 2 +- arch/powerpc/net/bpf_jit_comp64.c | 20 ++++++++++---------- 5 files changed, 19 insertions(+), 35 deletions(-) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 55d4377ccfae..535d1de4dfee 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -11,6 +11,7 @@ #ifndef __ASSEMBLY__ #include +#include #ifdef PPC64_ELF_ABI_v1 #define FUNCTION_DESCR_SIZE 24 @@ -26,7 +27,6 @@ #define IMM_H(i) ((uintptr_t)(i)>>16) #define IMM_HA(i) (((uintptr_t)(i)>>16) + \ (((uintptr_t)(i) & 0x8000) >> 15)) -#define IMM_L(i) ((uintptr_t)(i) & 0xffff) #define PLANT_INSTR(d, idx, instr) \ do { if (d) { (d)[idx] = instr; } idx++; } while (0) @@ -45,8 +45,6 @@ #define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) -#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ - ___PPC_RA(base) | ((i) & 0xfffc)) #define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \ ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ @@ -62,12 +60,8 @@ #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) -#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ - ___PPC_RA(base) | ((i) & 0xfffc)) #define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \ ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) #define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ @@ -75,16 +69,8 @@ #define PPC_LDBRX(r, base, b) EMIT(PPC_INST_LDBRX | ___PPC_RT(r) | \ ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_BPF_LDARX(t, a, b, eh) EMIT(PPC_INST_LDARX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b) | \ - __PPC_EH(eh)) -#define PPC_BPF_LWARX(t, a, b, eh) EMIT(PPC_INST_LWARX | ___PPC_RT(t) | \ - ___PPC_RA(a) | ___PPC_RB(b) | \ - __PPC_EH(eh)) #define PPC_BPF_STWCX(s, a, b) EMIT(PPC_INST_STWCX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ - ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ @@ -100,8 +86,6 @@ #define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \ ___PPC_RB(a) | ___PPC_RA(b)) -#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_MULD(d, a, b) EMIT(PPC_INST_MULLD | ___PPC_RT(d) | \ ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_MULW(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \ diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index 4ec2a9f14f84..753c244a7cf9 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h @@ -76,13 +76,13 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); else { PPC_ADDIS(r, base, IMM_HA(i)); \ PPC_LBZ(r, r, IMM_L(i)); } } while(0) -#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ +#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LD(r, base, i)); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LD(r, r, IMM_L(i)); } } while(0) + EMIT(PPC_RAW_LD(r, r, IMM_L(i))); } } while(0) -#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \ +#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LWZ(r, base, i)); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LWZ(r, r, IMM_L(i)); } } while(0) + EMIT(PPC_RAW_LWZ(r, r, IMM_L(i))); } } while(0) #define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ @@ -118,7 +118,7 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) #endif -#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) +#define PPC_BPF_LL(r, base, i) do { EMIT(PPC_RAW_LWZ(r, base, i)); } while(0) #define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) #define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index cf3a7e337f02..c144a3777158 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h @@ -73,14 +73,14 @@ static const int b2p[] = { PPC_LI(b2p[TMP_REG_2], (i)); \ PPC_LDX(r, base, b2p[TMP_REG_2]); \ } else \ - PPC_LD(r, base, i); \ + EMIT(PPC_RAW_LD(r, base, i)); \ } while(0) #define PPC_BPF_STL(r, base, i) do { \ if ((i) % 4) { \ PPC_LI(b2p[TMP_REG_2], (i)); \ PPC_STDX(r, base, b2p[TMP_REG_2]); \ } else \ - PPC_STD(r, base, i); \ + EMIT(PPC_RAW_STD(r, base, i)); \ } while(0) #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 0acc9d5fb19e..abcf56c00be5 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -134,7 +134,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, /*** ALU ops ***/ case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ ctx->seen |= SEEN_XREG; - PPC_ADD(r_A, r_A, r_X); + EMIT(PPC_RAW_ADD(r_A, r_A, r_X)); break; case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ if (!K) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index be3517ef0574..f721fbe6ca4d 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -239,7 +239,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * if (index >= array->map.max_entries) * goto out; */ - PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); + EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31); PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); PPC_BCC(COND_GE, out); @@ -260,7 +260,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 /* prog = array->ptrs[index]; */ PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); - PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); + EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array)); PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); /* @@ -340,7 +340,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, */ case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */ case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */ - PPC_ADD(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ @@ -357,7 +357,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_ADDI(dst_reg, dst_reg, IMM_L(imm)); else { PPC_LI32(b2p[TMP_REG_1], imm); - PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1])); } } goto bpf_alu32_trunc; @@ -689,9 +689,9 @@ emit_clear: PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); tmp_idx = ctx->idx * 4; /* load value from memory into TMP_REG_2 */ - PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); + EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0)); /* add value from src_reg into this */ - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); + EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg)); /* store result back */ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); /* we're done if this succeeded */ @@ -701,9 +701,9 @@ emit_clear: case BPF_STX | BPF_XADD | BPF_DW: PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); tmp_idx = ctx->idx * 4; - PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); - PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); + EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0)); + EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg)); + EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1])); PPC_BCC_SHORT(COND_NE, tmp_idx); break; @@ -724,7 +724,7 @@ emit_clear: break; /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_W: - PPC_LWZ(dst_reg, src_reg, off); + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; -- cgit v1.2.3 From 3a181237916310b2bbbad158d97933bb2b4e7552 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Wed, 24 Jun 2020 17:00:36 +0530 Subject: powerpc/ppc-opcode: Consolidate powerpc instructions from bpf_jit.h Move macro definitions of powerpc instructions from bpf_jit.h to ppc-opcode.h and adopt the users of the macros accordingly. `PPC_MR()` is defined twice in bpf_jit.h, remove the duplicate one. Signed-off-by: Balamuruhan S Tested-by: Naveen N. Rao Acked-by: Naveen N. Rao Acked-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624113038.908074-5-bala24@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 85 +++++++++++ arch/powerpc/net/bpf_jit.h | 166 ++------------------ arch/powerpc/net/bpf_jit32.h | 24 +-- arch/powerpc/net/bpf_jit64.h | 12 +- arch/powerpc/net/bpf_jit_comp.c | 132 ++++++++-------- arch/powerpc/net/bpf_jit_comp64.c | 278 +++++++++++++++++----------------- 6 files changed, 324 insertions(+), 373 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 49701d4ea0e1..cb24b758c784 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -79,6 +79,16 @@ #define IMM_L(i) ((uintptr_t)(i) & 0xffff) #define IMM_DS(i) ((uintptr_t)(i) & 0xfffc) +/* + * 16-bit immediate helper macros: HA() is for use with sign-extending instrs + * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the + * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000). + */ +#define IMM_H(i) ((uintptr_t)(i)>>16) +#define IMM_HA(i) (((uintptr_t)(i)>>16) + \ + (((uintptr_t)(i) & 0x8000) >> 15)) + + /* opcode and xopcode for instructions */ #define OP_TRAP 3 #define OP_TRAP_64 2 @@ -540,6 +550,81 @@ #define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) +#define PPC_RAW_NOP() (PPC_INST_NOP) +#define PPC_RAW_BLR() (PPC_INST_BLR) +#define PPC_RAW_BLRL() (PPC_INST_BLRL) +#define PPC_RAW_MTLR(r) (PPC_INST_MTLR | ___PPC_RT(r)) +#define PPC_RAW_BCTR() (PPC_INST_BCTR) +#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r)) +#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i) +#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i) +#define PPC_RAW_STDX(r, base, b) (PPC_INST_STDX | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_STDU(r, base, i) (PPC_INST_STDU | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_RAW_STW(r, base, i) (PPC_INST_STW | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_STWU(r, base, i) (PPC_INST_STWU | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_STH(r, base, i) (PPC_INST_STH | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_STB(r, base, i) (PPC_INST_STB | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LBZ(r, base, i) (PPC_INST_LBZ | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LDX(r, base, b) (PPC_INST_LDX | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LHZ(r, base, i) (PPC_INST_LHZ | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LHBRX(r, base, b) (PPC_INST_LHBRX | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LDBRX(r, base, b) (PPC_INST_LDBRX | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_STWCX(s, a, b) (PPC_INST_STWCX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPWI(a, i) (PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPDI(a, i) (PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPW(a, b) (PPC_INST_CMPW | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPD(a, b) (PPC_INST_CMPD | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPLWI(a, i) (PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPLDI(a, i) (PPC_INST_CMPLDI | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPLW(a, b) (PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPLD(a, b) (PPC_INST_CMPLD | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_SUB(d, a, b) (PPC_INST_SUB | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b)) +#define PPC_RAW_MULD(d, a, b) (PPC_INST_MULLD | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MULW(d, a, b) (PPC_INST_MULLW | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MULHWU(d, a, b) (PPC_INST_MULHWU | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MULI(d, a, i) (PPC_INST_MULLI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_DIVWU(d, a, b) (PPC_INST_DIVWU | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVDU(d, a, b) (PPC_INST_DIVDU | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_AND(d, a, b) (PPC_INST_AND | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_ANDI(d, a, i) (PPC_INST_ANDI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_AND_DOT(d, a, b) (PPC_INST_ANDDOT | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_OR(d, a, b) (PPC_INST_OR | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a) +#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_XOR(d, a, b) (PPC_INST_XOR | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_XORI(d, a, i) (PPC_INST_XORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_XORIS(d, a, i) (PPC_INST_XORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_EXTSW(d, a) (PPC_INST_EXTSW | ___PPC_RA(d) | ___PPC_RS(a)) +#define PPC_RAW_SLW(d, a, s) (PPC_INST_SLW | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SLD(d, a, s) (PPC_INST_SLD | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRW(d, a, s) (PPC_INST_SRW | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRAW(d, a, s) (PPC_INST_SRAW | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRAWI(d, a, i) (PPC_INST_SRAWI | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i)) +#define PPC_RAW_SRD(d, a, s) (PPC_INST_SRD | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRAD(d, a, s) (PPC_INST_SRAD | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRADI(d, a, i) (PPC_INST_SRADI | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i)) +#define PPC_RAW_RLWINM(d, a, i, mb, me) \ + (PPC_INST_RLWINM | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RAW_RLWINM_DOT(d, a, i, mb, me) \ + (PPC_INST_RLWINM_DOT | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RAW_RLWIMI(d, a, i, mb, me) \ + (PPC_INST_RLWIMI | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RAW_RLDICL(d, a, i, mb) (PPC_INST_RLDICL | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb)) +#define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me)) + +/* slwi = rlwinm Rx, Ry, n, 0, 31-n */ +#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i)) +/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */ +#define PPC_RAW_SRWI(d, a, i) PPC_RAW_RLWINM(d, a, 32-(i), i, 31) +/* sldi = rldicr Rx, Ry, n, 63-n */ +#define PPC_RAW_SLDI(d, a, i) PPC_RAW_RLDICR(d, a, i, 63-(i)) +/* sldi = rldicl Rx, Ry, 64-n, n */ +#define PPC_RAW_SRDI(d, a, i) PPC_RAW_RLDICL(d, a, 64-(i), i) + +#define PPC_RAW_NEG(d, a) (PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a)) /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 535d1de4dfee..d0a67a1bbaf1 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -19,150 +19,10 @@ #define FUNCTION_DESCR_SIZE 0 #endif -/* - * 16-bit immediate helper macros: HA() is for use with sign-extending instrs - * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the - * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000). - */ -#define IMM_H(i) ((uintptr_t)(i)>>16) -#define IMM_HA(i) (((uintptr_t)(i)>>16) + \ - (((uintptr_t)(i) & 0x8000) >> 15)) - #define PLANT_INSTR(d, idx, instr) \ do { if (d) { (d)[idx] = instr; } idx++; } while (0) #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) -#define PPC_NOP() EMIT(PPC_INST_NOP) -#define PPC_BLR() EMIT(PPC_INST_BLR) -#define PPC_BLRL() EMIT(PPC_INST_BLRL) -#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | ___PPC_RT(r)) -#define PPC_BCTR() EMIT(PPC_INST_BCTR) -#define PPC_MTCTR(r) EMIT(PPC_INST_MTCTR | ___PPC_RT(r)) -#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | ___PPC_RT(d) | \ - ___PPC_RA(a) | IMM_L(i)) -#define PPC_MR(d, a) PPC_OR(d, a, a) -#define PPC_LI(r, i) PPC_ADDI(r, 0, i) -#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ - ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) -#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) -#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \ - ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ - ___PPC_RA(base) | ((i) & 0xfffc)) -#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_STH(r, base, i) EMIT(PPC_INST_STH | ___PPC_RS(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_STB(r, base, i) EMIT(PPC_INST_STB | ___PPC_RS(r) | \ - ___PPC_RA(base) | IMM_L(i)) - -#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \ - ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ - ___PPC_RA(base) | IMM_L(i)) -#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ - ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_LDBRX(r, base, b) EMIT(PPC_INST_LDBRX | ___PPC_RT(r) | \ - ___PPC_RA(base) | ___PPC_RB(b)) - -#define PPC_BPF_STWCX(s, a, b) EMIT(PPC_INST_STWCX | ___PPC_RS(s) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ - ___PPC_RB(b)) -#define PPC_CMPD(a, b) EMIT(PPC_INST_CMPD | ___PPC_RA(a) | \ - ___PPC_RB(b)) -#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPLDI(a, i) EMIT(PPC_INST_CMPLDI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | \ - ___PPC_RB(b)) -#define PPC_CMPLD(a, b) EMIT(PPC_INST_CMPLD | ___PPC_RA(a) | \ - ___PPC_RB(b)) - -#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \ - ___PPC_RB(a) | ___PPC_RA(b)) -#define PPC_MULD(d, a, b) EMIT(PPC_INST_MULLD | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_MULW(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | ___PPC_RT(d) | \ - ___PPC_RA(a) | IMM_L(i)) -#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_MR(d, a) PPC_OR(d, a, a) -#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_XOR(d, a, b) EMIT(PPC_INST_XOR | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_XORI(d, a, i) EMIT(PPC_INST_XORI | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_XORIS(d, a, i) EMIT(PPC_INST_XORIS | ___PPC_RA(d) | \ - ___PPC_RS(a) | IMM_L(i)) -#define PPC_EXTSW(d, a) EMIT(PPC_INST_EXTSW | ___PPC_RA(d) | \ - ___PPC_RS(a)) -#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SLD(d, a, s) EMIT(PPC_INST_SLD | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SRAW(d, a, s) EMIT(PPC_INST_SRAW | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SRAWI(d, a, i) EMIT(PPC_INST_SRAWI | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH(i)) -#define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \ - ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_SRADI(d, a, i) EMIT(PPC_INST_SRADI | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH64(i)) -#define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH(i) | \ - __PPC_MB(mb) | __PPC_ME(me)) -#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \ - ___PPC_RA(d) | ___PPC_RS(a) | \ - __PPC_SH(i) | __PPC_MB(mb) | \ - __PPC_ME(me)) -#define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH(i) | \ - __PPC_MB(mb) | __PPC_ME(me)) -#define PPC_RLDICL(d, a, i, mb) EMIT(PPC_INST_RLDICL | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH64(i) | \ - __PPC_MB64(mb)) -#define PPC_RLDICR(d, a, i, me) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \ - ___PPC_RS(a) | __PPC_SH64(i) | \ - __PPC_ME64(me)) - -/* slwi = rlwinm Rx, Ry, n, 0, 31-n */ -#define PPC_SLWI(d, a, i) PPC_RLWINM(d, a, i, 0, 31-(i)) -/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */ -#define PPC_SRWI(d, a, i) PPC_RLWINM(d, a, 32-(i), i, 31) -/* sldi = rldicr Rx, Ry, n, 63-n */ -#define PPC_SLDI(d, a, i) PPC_RLDICR(d, a, i, 63-(i)) -/* sldi = rldicl Rx, Ry, 64-n, n */ -#define PPC_SRDI(d, a, i) PPC_RLDICL(d, a, 64-(i), i) - -#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a)) - /* Long jump; (unconditional 'branch') */ #define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ (((dest) - (ctx->idx * 4)) & 0x03fffffc)) @@ -175,11 +35,11 @@ #define PPC_LI32(d, i) do { \ if ((int)(uintptr_t)(i) >= -32768 && \ (int)(uintptr_t)(i) < 32768) \ - PPC_LI(d, i); \ + EMIT(PPC_RAW_LI(d, i)); \ else { \ - PPC_LIS(d, IMM_H(i)); \ + EMIT(PPC_RAW_LIS(d, IMM_H(i))); \ if (IMM_L(i)) \ - PPC_ORI(d, d, IMM_L(i)); \ + EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \ } } while(0) #define PPC_LI64(d, i) do { \ @@ -188,19 +48,21 @@ PPC_LI32(d, i); \ else { \ if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \ - PPC_LI(d, ((uintptr_t)(i) >> 32) & 0xffff); \ + EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) & \ + 0xffff)); \ else { \ - PPC_LIS(d, ((uintptr_t)(i) >> 48)); \ + EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \ if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \ - PPC_ORI(d, d, \ - ((uintptr_t)(i) >> 32) & 0xffff); \ + EMIT(PPC_RAW_ORI(d, d, \ + ((uintptr_t)(i) >> 32) & 0xffff)); \ } \ - PPC_SLDI(d, d, 32); \ + EMIT(PPC_RAW_SLDI(d, d, 32)); \ if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \ - PPC_ORIS(d, d, \ - ((uintptr_t)(i) >> 16) & 0xffff); \ + EMIT(PPC_RAW_ORIS(d, d, \ + ((uintptr_t)(i) >> 16) & 0xffff)); \ if ((uintptr_t)(i) & 0x000000000000ffffULL) \ - PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ + EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \ + 0xffff)); \ } } while (0) #ifdef CONFIG_PPC64 @@ -224,7 +86,7 @@ static inline bool is_nearbranch(int offset) #define PPC_BCC(cond, dest) do { \ if (is_nearbranch((dest) - (ctx->idx * 4))) { \ PPC_BCC_SHORT(cond, dest); \ - PPC_NOP(); \ + EMIT(PPC_RAW_NOP()); \ } else { \ /* Flip the 'T or F' bit to invert comparison */ \ PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \ diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index 753c244a7cf9..448dfd4d98e1 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h @@ -72,21 +72,21 @@ DECLARE_LOAD_FUNC(sk_load_half); DECLARE_LOAD_FUNC(sk_load_byte); DECLARE_LOAD_FUNC(sk_load_byte_msh); -#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LBZ(r, r, IMM_L(i)); } } while(0) +#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LBZ(r, base, i)); \ + else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ + EMIT(PPC_RAW_LBZ(r, r, IMM_L(i))); } } while(0) #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LD(r, base, i)); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ + else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ EMIT(PPC_RAW_LD(r, r, IMM_L(i))); } } while(0) #define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LWZ(r, base, i)); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ + else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ EMIT(PPC_RAW_LWZ(r, r, IMM_L(i))); } } while(0) -#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ - else { PPC_ADDIS(r, base, IMM_HA(i)); \ - PPC_LHZ(r, r, IMM_L(i)); } } while(0) +#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LHZ(r, base, i)); \ + else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \ + EMIT(PPC_RAW_LHZ(r, r, IMM_L(i))); } } while(0) #ifdef CONFIG_PPC64 #define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0) @@ -107,11 +107,11 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); } while(0) #endif #else -#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0) +#define PPC_BPF_LOAD_CPU(r) do { EMIT(PPC_RAW_LI(r, 0)); } while(0) #endif #define PPC_LHBRX_OFFS(r, base, i) \ - do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) + do { PPC_LI32(r, i); EMIT(PPC_RAW_LHBRX(r, r, base)); } while(0) #ifdef __LITTLE_ENDIAN__ #define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i) #else @@ -119,8 +119,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); #endif #define PPC_BPF_LL(r, base, i) do { EMIT(PPC_RAW_LWZ(r, base, i)); } while(0) -#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) -#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) +#define PPC_BPF_STL(r, base, i) do { EMIT(PPC_RAW_STW(r, base, i)); } while(0) +#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STWU(r, base, i)); } while(0) #define SEEN_DATAREF 0x10000 /* might call external helpers */ #define SEEN_XREG 0x20000 /* X reg is used */ diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index c144a3777158..2e33c6673ff9 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h @@ -70,19 +70,21 @@ static const int b2p[] = { */ #define PPC_BPF_LL(r, base, i) do { \ if ((i) % 4) { \ - PPC_LI(b2p[TMP_REG_2], (i)); \ - PPC_LDX(r, base, b2p[TMP_REG_2]); \ + EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\ + EMIT(PPC_RAW_LDX(r, base, \ + b2p[TMP_REG_2])); \ } else \ EMIT(PPC_RAW_LD(r, base, i)); \ } while(0) #define PPC_BPF_STL(r, base, i) do { \ if ((i) % 4) { \ - PPC_LI(b2p[TMP_REG_2], (i)); \ - PPC_STDX(r, base, b2p[TMP_REG_2]); \ + EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\ + EMIT(PPC_RAW_STDX(r, base, \ + b2p[TMP_REG_2])); \ } else \ EMIT(PPC_RAW_STD(r, base, i)); \ } while(0) -#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) +#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0) #define SEEN_FUNC 0x1000 /* might call external helpers */ #define SEEN_STACK 0x2000 /* uses BPF stack */ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index abcf56c00be5..16d09b36fe06 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -61,7 +61,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, data_len)); PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); - PPC_SUB(r_HL, r_HL, r_scratch1); + EMIT(PPC_RAW_SUB(r_HL, r_HL, r_scratch1)); PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); } @@ -70,12 +70,12 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, * TODO: Could also detect whether first instr. sets X and * avoid this (as below, with A). */ - PPC_LI(r_X, 0); + EMIT(PPC_RAW_LI(r_X, 0)); } /* make sure we dont leak kernel information to user */ if (bpf_needs_clear_a(&filter[0])) - PPC_LI(r_A, 0); + EMIT(PPC_RAW_LI(r_A, 0)); } static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) @@ -83,10 +83,10 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) int i; if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { - PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); + EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME)); if (ctx->seen & SEEN_DATAREF) { PPC_BPF_LL(0, 1, PPC_LR_STKOFF); - PPC_MTLR(0); + EMIT(PPC_RAW_MTLR(0)); PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D))); PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL))); } @@ -100,7 +100,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) } /* The RETs have left a return value in R3. */ - PPC_BLR(); + EMIT(PPC_RAW_BLR()); } #define CHOOSE_LOAD_FUNC(K, func) \ @@ -139,119 +139,119 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ if (!K) break; - PPC_ADDI(r_A, r_A, IMM_L(K)); + EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(K))); if (K >= 32768) - PPC_ADDIS(r_A, r_A, IMM_HA(K)); + EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(K))); break; case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ ctx->seen |= SEEN_XREG; - PPC_SUB(r_A, r_A, r_X); + EMIT(PPC_RAW_SUB(r_A, r_A, r_X)); break; case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ if (!K) break; - PPC_ADDI(r_A, r_A, IMM_L(-K)); + EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(-K))); if (K >= 32768) - PPC_ADDIS(r_A, r_A, IMM_HA(-K)); + EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(-K))); break; case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ ctx->seen |= SEEN_XREG; - PPC_MULW(r_A, r_A, r_X); + EMIT(PPC_RAW_MULW(r_A, r_A, r_X)); break; case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ if (K < 32768) - PPC_MULI(r_A, r_A, K); + EMIT(PPC_RAW_MULI(r_A, r_A, K)); else { PPC_LI32(r_scratch1, K); - PPC_MULW(r_A, r_A, r_scratch1); + EMIT(PPC_RAW_MULW(r_A, r_A, r_scratch1)); } break; case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ ctx->seen |= SEEN_XREG; - PPC_CMPWI(r_X, 0); + EMIT(PPC_RAW_CMPWI(r_X, 0)); if (ctx->pc_ret0 != -1) { PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - PPC_LI(r_ret, 0); + EMIT(PPC_RAW_LI(r_ret, 0)); PPC_JMP(exit_addr); } if (code == (BPF_ALU | BPF_MOD | BPF_X)) { - PPC_DIVWU(r_scratch1, r_A, r_X); - PPC_MULW(r_scratch1, r_X, r_scratch1); - PPC_SUB(r_A, r_A, r_scratch1); + EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_X)); + EMIT(PPC_RAW_MULW(r_scratch1, r_X, r_scratch1)); + EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1)); } else { - PPC_DIVWU(r_A, r_A, r_X); + EMIT(PPC_RAW_DIVWU(r_A, r_A, r_X)); } break; case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ PPC_LI32(r_scratch2, K); - PPC_DIVWU(r_scratch1, r_A, r_scratch2); - PPC_MULW(r_scratch1, r_scratch2, r_scratch1); - PPC_SUB(r_A, r_A, r_scratch1); + EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_scratch2)); + EMIT(PPC_RAW_MULW(r_scratch1, r_scratch2, r_scratch1)); + EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1)); break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; PPC_LI32(r_scratch1, K); - PPC_DIVWU(r_A, r_A, r_scratch1); + EMIT(PPC_RAW_DIVWU(r_A, r_A, r_scratch1)); break; case BPF_ALU | BPF_AND | BPF_X: ctx->seen |= SEEN_XREG; - PPC_AND(r_A, r_A, r_X); + EMIT(PPC_RAW_AND(r_A, r_A, r_X)); break; case BPF_ALU | BPF_AND | BPF_K: if (!IMM_H(K)) - PPC_ANDI(r_A, r_A, K); + EMIT(PPC_RAW_ANDI(r_A, r_A, K)); else { PPC_LI32(r_scratch1, K); - PPC_AND(r_A, r_A, r_scratch1); + EMIT(PPC_RAW_AND(r_A, r_A, r_scratch1)); } break; case BPF_ALU | BPF_OR | BPF_X: ctx->seen |= SEEN_XREG; - PPC_OR(r_A, r_A, r_X); + EMIT(PPC_RAW_OR(r_A, r_A, r_X)); break; case BPF_ALU | BPF_OR | BPF_K: if (IMM_L(K)) - PPC_ORI(r_A, r_A, IMM_L(K)); + EMIT(PPC_RAW_ORI(r_A, r_A, IMM_L(K))); if (K >= 65536) - PPC_ORIS(r_A, r_A, IMM_H(K)); + EMIT(PPC_RAW_ORIS(r_A, r_A, IMM_H(K))); break; case BPF_ANC | SKF_AD_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ ctx->seen |= SEEN_XREG; - PPC_XOR(r_A, r_A, r_X); + EMIT(PPC_RAW_XOR(r_A, r_A, r_X)); break; case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ if (IMM_L(K)) - PPC_XORI(r_A, r_A, IMM_L(K)); + EMIT(PPC_RAW_XORI(r_A, r_A, IMM_L(K))); if (K >= 65536) - PPC_XORIS(r_A, r_A, IMM_H(K)); + EMIT(PPC_RAW_XORIS(r_A, r_A, IMM_H(K))); break; case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ ctx->seen |= SEEN_XREG; - PPC_SLW(r_A, r_A, r_X); + EMIT(PPC_RAW_SLW(r_A, r_A, r_X)); break; case BPF_ALU | BPF_LSH | BPF_K: if (K == 0) break; else - PPC_SLWI(r_A, r_A, K); + EMIT(PPC_RAW_SLWI(r_A, r_A, K)); break; case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ ctx->seen |= SEEN_XREG; - PPC_SRW(r_A, r_A, r_X); + EMIT(PPC_RAW_SRW(r_A, r_A, r_X)); break; case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ if (K == 0) break; else - PPC_SRWI(r_A, r_A, K); + EMIT(PPC_RAW_SRWI(r_A, r_A, K)); break; case BPF_ALU | BPF_NEG: - PPC_NEG(r_A, r_A); + EMIT(PPC_RAW_NEG(r_A, r_A)); break; case BPF_RET | BPF_K: PPC_LI32(r_ret, K); @@ -277,24 +277,24 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, if (ctx->seen) PPC_JMP(exit_addr); else - PPC_BLR(); + EMIT(PPC_RAW_BLR()); } break; case BPF_RET | BPF_A: - PPC_MR(r_ret, r_A); + EMIT(PPC_RAW_MR(r_ret, r_A)); if (i != flen - 1) { if (ctx->seen) PPC_JMP(exit_addr); else - PPC_BLR(); + EMIT(PPC_RAW_BLR()); } break; case BPF_MISC | BPF_TAX: /* X = A */ - PPC_MR(r_X, r_A); + EMIT(PPC_RAW_MR(r_X, r_A)); break; case BPF_MISC | BPF_TXA: /* A = X */ ctx->seen |= SEEN_XREG; - PPC_MR(r_A, r_X); + EMIT(PPC_RAW_MR(r_A, r_X)); break; /*** Constant loads/M[] access ***/ @@ -305,19 +305,19 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_LI32(r_X, K); break; case BPF_LD | BPF_MEM: /* A = mem[K] */ - PPC_MR(r_A, r_M + (K & 0xf)); + EMIT(PPC_RAW_MR(r_A, r_M + (K & 0xf))); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; case BPF_LDX | BPF_MEM: /* X = mem[K] */ - PPC_MR(r_X, r_M + (K & 0xf)); + EMIT(PPC_RAW_MR(r_X, r_M + (K & 0xf))); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; case BPF_ST: /* mem[K] = A */ - PPC_MR(r_M + (K & 0xf), r_A); + EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_A)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; case BPF_STX: /* mem[K] = X */ - PPC_MR(r_M + (K & 0xf), r_X); + EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_X)); ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); break; case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ @@ -346,13 +346,13 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, type) != 2); PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); - PPC_CMPDI(r_scratch1, 0); + EMIT(PPC_RAW_CMPDI(r_scratch1, 0)); if (ctx->pc_ret0 != -1) { PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { /* Exit, returning 0; first pass hits here. */ PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12); - PPC_LI(r_ret, 0); + EMIT(PPC_RAW_LI(r_ret, 0)); PPC_JMP(exit_addr); } if (code == (BPF_ANC | SKF_AD_IFINDEX)) { @@ -383,9 +383,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET()); if (PKT_VLAN_PRESENT_BIT) - PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT); + EMIT(PPC_RAW_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT)); if (PKT_VLAN_PRESENT_BIT < 7) - PPC_ANDI(r_A, r_A, 1); + EMIT(PPC_RAW_ANDI(r_A, r_A, 1)); break; case BPF_ANC | SKF_AD_QUEUE: BUILD_BUG_ON(sizeof_field(struct sk_buff, @@ -395,8 +395,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, break; case BPF_ANC | SKF_AD_PKTTYPE: PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET()); - PPC_ANDI(r_A, r_A, PKT_TYPE_MAX); - PPC_SRWI(r_A, r_A, 5); + EMIT(PPC_RAW_ANDI(r_A, r_A, PKT_TYPE_MAX)); + EMIT(PPC_RAW_SRWI(r_A, r_A, 5)); break; case BPF_ANC | SKF_AD_CPU: PPC_BPF_LOAD_CPU(r_A); @@ -414,9 +414,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; PPC_FUNC_ADDR(r_scratch1, func); - PPC_MTLR(r_scratch1); + EMIT(PPC_RAW_MTLR(r_scratch1)); PPC_LI32(r_addr, K); - PPC_BLRL(); + EMIT(PPC_RAW_BLRL()); /* * Helper returns 'lt' condition on error, and an * appropriate return value in r3 @@ -440,11 +440,11 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_FUNC_ADDR(r_scratch1, func); - PPC_MTLR(r_scratch1); - PPC_ADDI(r_addr, r_X, IMM_L(K)); + EMIT(PPC_RAW_MTLR(r_scratch1)); + EMIT(PPC_RAW_ADDI(r_addr, r_X, IMM_L(K))); if (K >= 32768) - PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); - PPC_BLRL(); + EMIT(PPC_RAW_ADDIS(r_addr, r_addr, IMM_HA(K))); + EMIT(PPC_RAW_BLRL()); /* If error, cr0.LT set */ PPC_BCC(COND_LT, exit_addr); break; @@ -489,30 +489,30 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: ctx->seen |= SEEN_XREG; - PPC_CMPLW(r_A, r_X); + EMIT(PPC_RAW_CMPLW(r_A, r_X)); break; case BPF_JMP | BPF_JSET | BPF_X: ctx->seen |= SEEN_XREG; - PPC_AND_DOT(r_scratch1, r_A, r_X); + EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A, r_X)); break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: if (K < 32768) - PPC_CMPLWI(r_A, K); + EMIT(PPC_RAW_CMPLWI(r_A, K)); else { PPC_LI32(r_scratch1, K); - PPC_CMPLW(r_A, r_scratch1); + EMIT(PPC_RAW_CMPLW(r_A, r_scratch1)); } break; case BPF_JMP | BPF_JSET | BPF_K: if (K < 32768) /* PPC_ANDI is /only/ dot-form */ - PPC_ANDI(r_scratch1, r_A, K); + EMIT(PPC_RAW_ANDI(r_scratch1, r_A, K)); else { PPC_LI32(r_scratch1, K); - PPC_AND_DOT(r_scratch1, r_A, - r_scratch1); + EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A, + r_scratch1)); } break; } diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index f721fbe6ca4d..022103c6a201 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -95,12 +95,12 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) * invoked through a tail call. */ if (ctx->seen & SEEN_TAILCALL) { - PPC_LI(b2p[TMP_REG_1], 0); + EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0)); /* this goes in the redzone */ PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8)); } else { - PPC_NOP(); - PPC_NOP(); + EMIT(PPC_RAW_NOP()); + EMIT(PPC_RAW_NOP()); } #define BPF_TAILCALL_PROLOGUE_SIZE 8 @@ -129,8 +129,8 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) /* Setup frame pointer to point to the bpf stack area */ if (bpf_is_seen_register(ctx, BPF_REG_FP)) - PPC_ADDI(b2p[BPF_REG_FP], 1, - STACK_FRAME_MIN_SIZE + ctx->stack_size); + EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1, + STACK_FRAME_MIN_SIZE + ctx->stack_size)); } static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx) @@ -144,10 +144,10 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { - PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size); + EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size)); if (ctx->seen & SEEN_FUNC) { PPC_BPF_LL(0, 1, PPC_LR_STKOFF); - PPC_MTLR(0); + EMIT(PPC_RAW_MTLR(0)); } } } @@ -157,9 +157,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) bpf_jit_emit_common_epilogue(image, ctx); /* Move result to r3 */ - PPC_MR(3, b2p[BPF_REG_0]); + EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0])); - PPC_BLR(); + EMIT(PPC_RAW_BLR()); } static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, @@ -171,7 +171,7 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, /* Load actual entry point from function descriptor */ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); /* ... and move it to LR */ - PPC_MTLR(b2p[TMP_REG_1]); + EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1])); /* * Load TOC from function descriptor at offset 8. * We can clobber r2 since we get called through a @@ -182,9 +182,9 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, #else /* We can clobber r12 */ PPC_FUNC_ADDR(12, func); - PPC_MTLR(12); + EMIT(PPC_RAW_MTLR(12)); #endif - PPC_BLRL(); + EMIT(PPC_RAW_BLRL()); } static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, @@ -206,7 +206,7 @@ static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, * that PPC_LI64() can emit. */ for (i = ctx->idx - ctx_idx; i < 5; i++) - PPC_NOP(); + EMIT(PPC_RAW_NOP()); #ifdef PPC64_ELF_ABI_v1 /* @@ -220,8 +220,8 @@ static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, PPC_BPF_LL(12, 12, 0); #endif - PPC_MTLR(12); - PPC_BLRL(); + EMIT(PPC_RAW_MTLR(12)); + EMIT(PPC_RAW_BLRL()); } static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) @@ -240,8 +240,8 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * goto out; */ EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries))); - PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31); - PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); + EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31)); + EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1])); PPC_BCC(COND_GE, out); /* @@ -249,17 +249,17 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * goto out; */ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); - PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); + EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT)); PPC_BCC(COND_GT, out); /* * tail_call_cnt++; */ - PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1)); PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); /* prog = array->ptrs[index]; */ - PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); + EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8)); EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array)); PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); @@ -267,24 +267,24 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * if (prog == NULL) * goto out; */ - PPC_CMPLDI(b2p[TMP_REG_1], 0); + EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0)); PPC_BCC(COND_EQ, out); /* goto *(prog->bpf_func + prologue_size); */ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); #ifdef PPC64_ELF_ABI_v1 /* skip past the function descriptor */ - PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], - FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], + FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE)); #else - PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE)); #endif - PPC_MTCTR(b2p[TMP_REG_1]); + EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1])); /* tear down stack, restore NVRs, ... */ bpf_jit_emit_common_epilogue(image, ctx); - PPC_BCTR(); + EMIT(PPC_RAW_BCTR()); /* out: */ } @@ -344,7 +344,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, goto bpf_alu32_trunc; case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */ - PPC_SUB(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */ @@ -354,7 +354,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, imm = -imm; if (imm) { if (imm >= -32768 && imm < 32768) - PPC_ADDI(dst_reg, dst_reg, IMM_L(imm)); + EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm))); else { PPC_LI32(b2p[TMP_REG_1], imm); EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1])); @@ -364,43 +364,43 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */ case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */ if (BPF_CLASS(code) == BPF_ALU) - PPC_MULW(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg)); else - PPC_MULD(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */ if (imm >= -32768 && imm < 32768) - PPC_MULI(dst_reg, dst_reg, IMM_L(imm)); + EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm))); else { PPC_LI32(b2p[TMP_REG_1], imm); if (BPF_CLASS(code) == BPF_ALU) - PPC_MULW(dst_reg, dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_MULW(dst_reg, dst_reg, + b2p[TMP_REG_1])); else - PPC_MULD(dst_reg, dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_MULD(dst_reg, dst_reg, + b2p[TMP_REG_1])); } goto bpf_alu32_trunc; case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ if (BPF_OP(code) == BPF_MOD) { - PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg); - PPC_MULW(b2p[TMP_REG_1], src_reg, - b2p[TMP_REG_1]); - PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg)); + EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg, + b2p[TMP_REG_1])); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1])); } else - PPC_DIVWU(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ if (BPF_OP(code) == BPF_MOD) { - PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg); - PPC_MULD(b2p[TMP_REG_1], src_reg, - b2p[TMP_REG_1]); - PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg)); + EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg, + b2p[TMP_REG_1])); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1])); } else - PPC_DIVDU(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg)); break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ @@ -415,35 +415,37 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, switch (BPF_CLASS(code)) { case BPF_ALU: if (BPF_OP(code) == BPF_MOD) { - PPC_DIVWU(b2p[TMP_REG_2], dst_reg, - b2p[TMP_REG_1]); - PPC_MULW(b2p[TMP_REG_1], + EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2], + dst_reg, + b2p[TMP_REG_1])); + EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], b2p[TMP_REG_1], - b2p[TMP_REG_2]); - PPC_SUB(dst_reg, dst_reg, - b2p[TMP_REG_1]); + b2p[TMP_REG_2])); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, + b2p[TMP_REG_1])); } else - PPC_DIVWU(dst_reg, dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, + b2p[TMP_REG_1])); break; case BPF_ALU64: if (BPF_OP(code) == BPF_MOD) { - PPC_DIVDU(b2p[TMP_REG_2], dst_reg, - b2p[TMP_REG_1]); - PPC_MULD(b2p[TMP_REG_1], + EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2], + dst_reg, + b2p[TMP_REG_1])); + EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], b2p[TMP_REG_1], - b2p[TMP_REG_2]); - PPC_SUB(dst_reg, dst_reg, - b2p[TMP_REG_1]); + b2p[TMP_REG_2])); + EMIT(PPC_RAW_SUB(dst_reg, dst_reg, + b2p[TMP_REG_1])); } else - PPC_DIVDU(dst_reg, dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, + b2p[TMP_REG_1])); break; } goto bpf_alu32_trunc; case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */ case BPF_ALU64 | BPF_NEG: /* dst = -dst */ - PPC_NEG(dst_reg, dst_reg); + EMIT(PPC_RAW_NEG(dst_reg, dst_reg)); goto bpf_alu32_trunc; /* @@ -451,101 +453,101 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, */ case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */ case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */ - PPC_AND(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */ case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ if (!IMM_H(imm)) - PPC_ANDI(dst_reg, dst_reg, IMM_L(imm)); + EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm))); else { /* Sign-extended */ PPC_LI32(b2p[TMP_REG_1], imm); - PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1])); } goto bpf_alu32_trunc; case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */ case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */ - PPC_OR(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */ case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */ if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { /* Sign-extended */ PPC_LI32(b2p[TMP_REG_1], imm); - PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1])); } else { if (IMM_L(imm)) - PPC_ORI(dst_reg, dst_reg, IMM_L(imm)); + EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm))); if (IMM_H(imm)) - PPC_ORIS(dst_reg, dst_reg, IMM_H(imm)); + EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm))); } goto bpf_alu32_trunc; case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */ case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */ - PPC_XOR(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */ if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) { /* Sign-extended */ PPC_LI32(b2p[TMP_REG_1], imm); - PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1])); } else { if (IMM_L(imm)) - PPC_XORI(dst_reg, dst_reg, IMM_L(imm)); + EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm))); if (IMM_H(imm)) - PPC_XORIS(dst_reg, dst_reg, IMM_H(imm)); + EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm))); } goto bpf_alu32_trunc; case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */ /* slw clears top 32 bits */ - PPC_SLW(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg)); /* skip zero extension move, but set address map. */ if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */ - PPC_SLD(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg)); break; case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */ /* with imm 0, we still need to clear top 32 bits */ - PPC_SLWI(dst_reg, dst_reg, imm); + EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */ if (imm != 0) - PPC_SLDI(dst_reg, dst_reg, imm); + EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm)); break; case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */ - PPC_SRW(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */ - PPC_SRD(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg)); break; case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */ - PPC_SRWI(dst_reg, dst_reg, imm); + EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */ if (imm != 0) - PPC_SRDI(dst_reg, dst_reg, imm); + EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm)); break; case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */ - PPC_SRAW(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */ - PPC_SRAD(dst_reg, dst_reg, src_reg); + EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg)); break; case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */ - PPC_SRAWI(dst_reg, dst_reg, imm); + EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm)); goto bpf_alu32_trunc; case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */ if (imm != 0) - PPC_SRADI(dst_reg, dst_reg, imm); + EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm)); break; /* @@ -555,10 +557,10 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ if (imm == 1) { /* special mov32 for zext */ - PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31); + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); break; } - PPC_MR(dst_reg, src_reg); + EMIT(PPC_RAW_MR(dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ @@ -572,7 +574,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, bpf_alu32_trunc: /* Truncate to 32-bits */ if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext) - PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31); + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); break; /* @@ -590,11 +592,11 @@ bpf_alu32_trunc: switch (imm) { case 16: /* Rotate 8 bits left & mask with 0x0000ff00 */ - PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23); + EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23)); /* Rotate 8 bits right & insert LSB to reg */ - PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31); + EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31)); /* Move result back to dst_reg */ - PPC_MR(dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); break; case 32: /* @@ -602,12 +604,12 @@ bpf_alu32_trunc: * 2 bytes are already in their final position * -- byte 2 and 4 (of bytes 1, 2, 3 and 4) */ - PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31); + EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31)); /* Rotate 24 bits and insert byte 1 */ - PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7); + EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7)); /* Rotate 24 bits and insert byte 3 */ - PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23); - PPC_MR(dst_reg, b2p[TMP_REG_1]); + EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23)); + EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); break; case 64: /* @@ -619,8 +621,8 @@ bpf_alu32_trunc: * same across all passes */ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); - PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); - PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); + EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); break; } break; @@ -629,14 +631,14 @@ emit_clear: switch (imm) { case 16: /* zero-extend 16 bits into 64 bits */ - PPC_RLDICL(dst_reg, dst_reg, 0, 48); + EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; case 32: if (!fp->aux->verifier_zext) /* zero-extend 32 bits into 64 bits */ - PPC_RLDICL(dst_reg, dst_reg, 0, 32); + EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32)); break; case 64: /* nop */ @@ -650,18 +652,18 @@ emit_clear: case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */ case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ if (BPF_CLASS(code) == BPF_ST) { - PPC_LI(b2p[TMP_REG_1], imm); + EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm)); src_reg = b2p[TMP_REG_1]; } - PPC_STB(src_reg, dst_reg, off); + EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); break; case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ if (BPF_CLASS(code) == BPF_ST) { - PPC_LI(b2p[TMP_REG_1], imm); + EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm)); src_reg = b2p[TMP_REG_1]; } - PPC_STH(src_reg, dst_reg, off); + EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); break; case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ @@ -669,7 +671,7 @@ emit_clear: PPC_LI32(b2p[TMP_REG_1], imm); src_reg = b2p[TMP_REG_1]; } - PPC_STW(src_reg, dst_reg, off); + EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); break; case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ @@ -686,20 +688,20 @@ emit_clear: /* *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: /* Get EA into TMP_REG_1 */ - PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off)); tmp_idx = ctx->idx * 4; /* load value from memory into TMP_REG_2 */ EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0)); /* add value from src_reg into this */ EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg)); /* store result back */ - PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); + EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1])); /* we're done if this succeeded */ PPC_BCC_SHORT(COND_NE, tmp_idx); break; /* *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: - PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); + EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off)); tmp_idx = ctx->idx * 4; EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0)); EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg)); @@ -712,13 +714,13 @@ emit_clear: */ /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_B: - PPC_LBZ(dst_reg, src_reg, off); + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_H: - PPC_LHZ(dst_reg, src_reg, off); + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); if (insn_is_zext(&insn[i + 1])) addrs[++i] = ctx->idx * 4; break; @@ -775,7 +777,7 @@ emit_clear: else bpf_jit_emit_func_call_rel(image, ctx, func_addr); /* move return value from r3 to BPF_REG_0 */ - PPC_MR(b2p[BPF_REG_0], 3); + EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3)); break; /* @@ -860,9 +862,9 @@ cond_branch: case BPF_JMP32 | BPF_JNE | BPF_X: /* unsigned comparison */ if (BPF_CLASS(code) == BPF_JMP32) - PPC_CMPLW(dst_reg, src_reg); + EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); else - PPC_CMPLD(dst_reg, src_reg); + EMIT(PPC_RAW_CMPLD(dst_reg, src_reg)); break; case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: @@ -874,21 +876,21 @@ cond_branch: case BPF_JMP32 | BPF_JSLE | BPF_X: /* signed comparison */ if (BPF_CLASS(code) == BPF_JMP32) - PPC_CMPW(dst_reg, src_reg); + EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); else - PPC_CMPD(dst_reg, src_reg); + EMIT(PPC_RAW_CMPD(dst_reg, src_reg)); break; case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: if (BPF_CLASS(code) == BPF_JMP) { - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, - src_reg); + EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg, + src_reg)); } else { int tmp_reg = b2p[TMP_REG_1]; - PPC_AND(tmp_reg, dst_reg, src_reg); - PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0, - 31); + EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0, + 31)); } break; case BPF_JMP | BPF_JNE | BPF_K: @@ -912,19 +914,19 @@ cond_branch: */ if (imm >= 0 && imm < 32768) { if (is_jmp32) - PPC_CMPLWI(dst_reg, imm); + EMIT(PPC_RAW_CMPLWI(dst_reg, imm)); else - PPC_CMPLDI(dst_reg, imm); + EMIT(PPC_RAW_CMPLDI(dst_reg, imm)); } else { /* sign-extending load */ PPC_LI32(b2p[TMP_REG_1], imm); /* ... but unsigned comparison */ if (is_jmp32) - PPC_CMPLW(dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_CMPLW(dst_reg, + b2p[TMP_REG_1])); else - PPC_CMPLD(dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_CMPLD(dst_reg, + b2p[TMP_REG_1])); } break; } @@ -945,17 +947,17 @@ cond_branch: */ if (imm >= -32768 && imm < 32768) { if (is_jmp32) - PPC_CMPWI(dst_reg, imm); + EMIT(PPC_RAW_CMPWI(dst_reg, imm)); else - PPC_CMPDI(dst_reg, imm); + EMIT(PPC_RAW_CMPDI(dst_reg, imm)); } else { PPC_LI32(b2p[TMP_REG_1], imm); if (is_jmp32) - PPC_CMPW(dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_CMPW(dst_reg, + b2p[TMP_REG_1])); else - PPC_CMPD(dst_reg, - b2p[TMP_REG_1]); + EMIT(PPC_RAW_CMPD(dst_reg, + b2p[TMP_REG_1])); } break; } @@ -964,19 +966,19 @@ cond_branch: /* andi does not sign-extend the immediate */ if (imm >= 0 && imm < 32768) /* PPC_ANDI is _only/always_ dot-form */ - PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm); + EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm)); else { int tmp_reg = b2p[TMP_REG_1]; PPC_LI32(tmp_reg, imm); if (BPF_CLASS(code) == BPF_JMP) { - PPC_AND_DOT(tmp_reg, dst_reg, - tmp_reg); + EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg, + tmp_reg)); } else { - PPC_AND(tmp_reg, dst_reg, - tmp_reg); - PPC_RLWINM_DOT(tmp_reg, tmp_reg, - 0, 0, 31); + EMIT(PPC_RAW_AND(tmp_reg, dst_reg, + tmp_reg)); + EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, + 0, 0, 31)); } } break; -- cgit v1.2.3 From 357c572948310c88868cee00e64ca3f7fc933a74 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Wed, 24 Jun 2020 17:00:37 +0530 Subject: powerpc/ppc-opcode: Reuse raw instruction macros to stringify Wrap existing stringify macros to reuse raw instruction encoding macros that are newly added. Signed-off-by: Balamuruhan S Tested-by: Naveen N. Rao Acked-by: Naveen N. Rao [mpe: Add DCBFPS, DCBSTPS, PHWSYNC, PLWSYNC] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624113038.908074-6-bala24@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 233 +++++++++++----------------------- 1 file changed, 75 insertions(+), 158 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index cb24b758c784..88cf292b3324 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -628,175 +628,96 @@ /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) -#define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT) -#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \ - ___PPC_RT(t) | \ - (((l) & 0x3) << 16)) -#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ - __PPC_RA(a) | __PPC_RB(b)) -#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ - __PPC_RA(a) | __PPC_RB(b)) -#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_INST_STQCX | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b)) -#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHD | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b) | ___PPC_RC(c)) -#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHDU | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b) | ___PPC_RC(c)) -#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDLD | \ - ___PPC_RT(t) | ___PPC_RA(a) | \ - ___PPC_RB(b) | ___PPC_RC(c)) -#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ - ___PPC_RB(b)) -#define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC) -#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \ - ___PPC_RB(b)) -#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ - ___PPC_RB(b)) -#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \ - ___PPC_RB(b)) -#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \ - ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ - __PPC_RA(a) | __PPC_RS(s)) -#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ - __PPC_RA(a) | __PPC_RS(s)) -#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \ - __PPC_RA(a) | __PPC_RS(s)) -#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) -#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) -#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) -#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \ - __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT) +#define PPC_COPY(a, b) stringify_in_c(.long PPC_RAW_COPY(a, b)) +#define PPC_DARN(t, l) stringify_in_c(.long PPC_RAW_DARN(t, l)) +#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_RAW_DCBAL(a, b)) +#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b)) +#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh)) +#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LDARX(t, a, b, eh)) +#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LWARX(t, a, b, eh)) +#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b)) +#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c)) +#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHDU(t, a, b, c)) +#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDLD(t, a, b, c)) +#define PPC_MSGSND(b) stringify_in_c(.long PPC_RAW_MSGSND(b)) +#define PPC_MSGSYNC stringify_in_c(.long PPC_RAW_MSGSYNC) +#define PPC_MSGCLR(b) stringify_in_c(.long PPC_RAW_MSGCLR(b)) +#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_RAW_MSGSNDP(b)) +#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_RAW_MSGCLRP(b)) +#define PPC_PASTE(a, b) stringify_in_c(.long PPC_RAW_PASTE(a, b)) +#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_RAW_POPCNTB(a, s)) +#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_RAW_POPCNTD(a, s)) +#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_RAW_POPCNTW(a, s)) +#define PPC_RFCI stringify_in_c(.long PPC_RAW_RFCI) +#define PPC_RFDI stringify_in_c(.long PPC_RAW_RFDI) +#define PPC_RFMCI stringify_in_c(.long PPC_RAW_RFMCI) +#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_RAW_TLBILX(t, a, b)) #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) -#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ - __PPC_WC(w)) -#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \ - ___PPC_RB(a) | ___PPC_RS(lp)) -#define PPC_TLBIE_5(rb,rs,ric,prs,r) \ - stringify_in_c(.long PPC_INST_TLBIE | \ - ___PPC_RB(rb) | ___PPC_RS(rs) | \ - ___PPC_RIC(ric) | ___PPC_PRS(prs) | \ - ___PPC_R(r)) +#define PPC_WAIT(w) stringify_in_c(.long PPC_RAW_WAIT(w)) +#define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a)) +#define PPC_TLBIE_5(rb, rs, ric, prs, r) \ + stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r)) #define PPC_TLBIEL(rb,rs,ric,prs,r) \ - stringify_in_c(.long PPC_INST_TLBIEL | \ - ___PPC_RB(rb) | ___PPC_RS(rs) | \ - ___PPC_RIC(ric) | ___PPC_PRS(prs) | \ - ___PPC_R(r)) -#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \ - __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ - __PPC_RA0(a) | __PPC_RB(b)) - -#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \ - __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) -#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \ - __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) -#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \ - __PPC_T_TLB(t) | __PPC_RA0(a) | \ - __PPC_RB(b)) -#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \ - __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \ - __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ - __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ - __PPC_RT(t) | __PPC_RB(b)) -#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ - ___PPC_RT(t) | ___PPC_RB(b)) -#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ - __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) + stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r)) +#define PPC_TLBSRX_DOT(a, b) stringify_in_c(.long PPC_RAW_TLBSRX_DOT(a, b)) +#define PPC_TLBIVAX(a, b) stringify_in_c(.long PPC_RAW_TLBIVAX(a, b)) + +#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_RAW_ERATWE(s, a, w)) +#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_RAW_ERATRE(a, a, w)) +#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_RAW_ERATILX(t, a, b)) +#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_RAW_ERATIVAX(s, a, b)) +#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX(t, a, w)) +#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX_DOT(t, a, w)) +#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_RAW_SLBFEE_DOT(t, b)) +#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long __PPC_RAW_SLBFEE_DOT(t, b)) +#define PPC_ICBT(c, a, b) stringify_in_c(.long PPC_RAW_ICBT(c, a, b)) /* PASemi instructions */ -#define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \ - __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b)) -#define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \ - __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) - -#define PPC_DCBFPS(a, b) stringify_in_c(.long PPC_INST_DCBF | \ - ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21)) -#define PPC_DCBSTPS(a, b) stringify_in_c(.long PPC_INST_DCBF | \ - ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21)) - -#define PPC_PHWSYNC stringify_in_c(.long PPC_INST_PHWSYNC) -#define PPC_PLWSYNC stringify_in_c(.long PPC_INST_PLWSYNC) - -#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \ - VSX_XX1((s), a, b)) -#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ - VSX_XX1((s), a, b)) -#define MFVRD(a, t) stringify_in_c(.long PPC_INST_MFVSRD | \ - VSX_XX1((t)+32, a, R0)) -#define MTVRD(t, a) stringify_in_c(.long PPC_INST_MTVSRD | \ - VSX_XX1((t)+32, a, R0)) -#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMW | \ - VSX_XX3((t), a, b)) -#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMD | \ - VSX_XX3((t), a, b)) -#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ - VSX_XX3((t), a, b)) -#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \ - VSX_XX3((t), a, a)) -#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \ - VSX_XX3((t), (a), (b)))) +#define LBZCIX(t, a, b) stringify_in_c(.long PPC_RAW_LBZCIX(t, a, b)) +#define STBCIX(s, a, b) stringify_in_c(.long PPC_RAW_STBCIX(s, a, b)) +#define PPC_DCBFPS(a, b) stringify_in_c(.long PPC_RAW_DCBFPS(a, b)) +#define PPC_DCBSTPS(a, b) stringify_in_c(.long PPC_RAW_DCBSTPS(a, b)) +#define PPC_PHWSYNC stringify_in_c(.long PPC_RAW_PHWSYNC) +#define PPC_PLWSYNC stringify_in_c(.long PPC_RAW_PLWSYNC) +#define STXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_STXVD2X(s, a, b)) +#define LXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_LXVD2X(s, a, b)) +#define MFVRD(a, t) stringify_in_c(.long PPC_RAW_MFVRD(a, t)) +#define MTVRD(t, a) stringify_in_c(.long PPC_RAW_MTVRD(t, a)) +#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMW(t, a, b)) +#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMD(t, a, b)) +#define XXLOR(t, a, b) stringify_in_c(.long PPC_RAW_XXLOR(t, a, b)) +#define XXSWAPD(t, a) stringify_in_c(.long PPC_RAW_XXSWAPD(t, a)) +#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_RAW_XVCPSGNDP(t, a, b))) #define VPERMXOR(vrt, vra, vrb, vrc) \ - stringify_in_c(.long (PPC_INST_VPERMXOR | \ - ___PPC_RT(vrt) | ___PPC_RA(vra) | \ - ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6))) + stringify_in_c(.long (PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc))) -#define PPC_NAP stringify_in_c(.long PPC_INST_NAP) -#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) -#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE) +#define PPC_NAP stringify_in_c(.long PPC_RAW_NAP) +#define PPC_SLEEP stringify_in_c(.long PPC_RAW_SLEEP) +#define PPC_WINKLE stringify_in_c(.long PPC_RAW_WINKLE) -#define PPC_STOP stringify_in_c(.long PPC_INST_STOP) +#define PPC_STOP stringify_in_c(.long PPC_RAW_STOP) /* BHRB instructions */ -#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB) -#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \ - __PPC_RT(r) | \ - (((n) & 0x3ff) << 11)) +#define PPC_CLRBHRB stringify_in_c(.long PPC_RAW_CLRBHRB) +#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_RAW_MFBHRBE(r, n)) /* Transactional memory instructions */ -#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT) -#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \ - | __PPC_RA(r)) -#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \ - | __PPC_RA(r)) +#define TRECHKPT stringify_in_c(.long PPC_RAW_TRECHKPT) +#define TRECLAIM(r) stringify_in_c(.long PPC_RAW_TRECLAIM(r)) +#define TABORT(r) stringify_in_c(.long PPC_RAW_TABORT(r)) /* book3e thread control instructions */ -#define MTTMR(tmr, r) stringify_in_c(.long PPC_INST_MTTMR | \ - TMRN(tmr) | ___PPC_RS(r)) -#define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \ - TMRN(tmr) | ___PPC_RT(r)) +#define MTTMR(tmr, r) stringify_in_c(.long PPC_RAW_MTTMR(tmr, r)) +#define MFTMR(tmr, r) stringify_in_c(.long PPC_RAW_MFTMR(tmr, r)) /* Coprocessor instructions */ -#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \ - ___PPC_RS(s) | \ - ___PPC_RA(a) | \ - ___PPC_RB(b)) -#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \ - ___PPC_RS(s) | \ - ___PPC_RA(a) | \ - ___PPC_RB(b)) - -#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ - ((IH & 0x7) << 21)) +#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWX(s, a, b)) +#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWEPX(s, a, b)) + +#define PPC_SLBIA(IH) stringify_in_c(.long PPC_RAW_SLBIA(IH)) /* * These may only be used on ISA v3.0 or later (aka. CPU_FTR_ARCH_300, radix @@ -808,12 +729,8 @@ #define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3) #define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6) -#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \ - ___PPC_RT(vrt) | ___PPC_RA(vra) | \ - ___PPC_RB(vrb) | __PPC_RC21) +#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb)) -#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUB | \ - ___PPC_RT(vrt) | ___PPC_RA(vra) | \ - ___PPC_RB(vrb) | __PPC_RC21) +#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb)) #endif /* _ASM_POWERPC_PPC_OPCODE_H */ -- cgit v1.2.3 From e4208f1399b1bf7ed84ba359a6ba0979d1df4029 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Wed, 24 Jun 2020 17:00:38 +0530 Subject: powerpc/ppc-opcode: Fold PPC_INST_* macros into PPC_RAW_* macros Lots of PPC_INST_* macros are used only ever in PPC_* macros, fold those PPC_INST_* into PPC_RAW_* to avoid using PPC_INST_* accidentally. Signed-off-by: Balamuruhan S Tested-by: Naveen N. Rao Acked-by: Naveen N. Rao [mpe: Deal with PHWSYNC, PLWSYNC] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200624113038.908074-7-bala24@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 394 +++++++++++----------------------- 1 file changed, 129 insertions(+), 265 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 88cf292b3324..6b7a3f0f2d62 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -209,58 +209,27 @@ /* sorted alphabetically */ #define PPC_INST_BCCTR_FLUSH 0x4c400420 -#define PPC_INST_BHRBE 0x7c00025c -#define PPC_INST_CLRBHRB 0x7c00035c #define PPC_INST_COPY 0x7c20060c -#define PPC_INST_CP_ABORT 0x7c00068c -#define PPC_INST_DARN 0x7c0005e6 #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe -#define PPC_INST_DCBAL 0x7c2005ec -#define PPC_INST_DCBZL 0x7c2007ec -#define PPC_INST_ICBT 0x7c00002c -#define PPC_INST_ICSWX 0x7c00032d -#define PPC_INST_ICSWEPX 0x7c00076d #define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL_MASK 0xfc00003e -#define PPC_INST_LDARX 0x7c0000a8 -#define PPC_INST_STDCX 0x7c0001ad -#define PPC_INST_LQARX 0x7c000228 -#define PPC_INST_STQCX 0x7c00016d #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a -#define PPC_INST_LWARX 0x7c000028 -#define PPC_INST_STWCX 0x7c00012d #define PPC_INST_LWSYNC 0x7c2004ac #define PPC_INST_SYNC 0x7c0004ac -#define PPC_INST_PHWSYNC 0x7c8004ac -#define PPC_INST_PLWSYNC 0x7ca004ac #define PPC_INST_SYNC_MASK 0xfc0007fe #define PPC_INST_ISYNC 0x4c00012c -#define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe #define PPC_INST_MFSPR_PVR 0x7c1f42a6 #define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe -#define PPC_INST_MFTMR 0x7c0002dc -#define PPC_INST_MSGSND 0x7c00019c -#define PPC_INST_MSGCLR 0x7c0001dc -#define PPC_INST_MSGSYNC 0x7c0006ec -#define PPC_INST_MSGSNDP 0x7c00011c -#define PPC_INST_MSGCLRP 0x7c00015c #define PPC_INST_MTMSRD 0x7c000164 -#define PPC_INST_MTTMR 0x7c0003dc #define PPC_INST_NOP 0x60000000 -#define PPC_INST_PASTE 0x7c20070d #define PPC_INST_POPCNTB 0x7c0000f4 #define PPC_INST_POPCNTB_MASK 0xfc0007fe -#define PPC_INST_POPCNTD 0x7c0003f4 -#define PPC_INST_POPCNTW 0x7c0002f4 #define PPC_INST_RFEBB 0x4c000124 -#define PPC_INST_RFCI 0x4c000066 -#define PPC_INST_RFDI 0x4c00004e #define PPC_INST_RFID 0x4c000024 -#define PPC_INST_RFMCI 0x4c00004c #define PPC_INST_MFSPR 0x7c0002a6 #define PPC_INST_MFSPR_DSCR 0x7c1102a6 #define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe @@ -270,134 +239,31 @@ #define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe #define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 #define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe -#define PPC_INST_MFVSRD 0x7c000066 -#define PPC_INST_MTVSRD 0x7c000166 #define PPC_INST_SC 0x44000002 -#define PPC_INST_SLBFEE 0x7c0007a7 -#define PPC_INST_SLBIA 0x7c0003e4 - #define PPC_INST_STRING 0x7c00042a #define PPC_INST_STRING_MASK 0xfc0007fe #define PPC_INST_STRING_GEN_MASK 0xfc00067e - #define PPC_INST_STSWI 0x7c0005aa #define PPC_INST_STSWX 0x7c00052a -#define PPC_INST_STXVD2X 0x7c000798 -#define PPC_INST_TLBIE 0x7c000264 -#define PPC_INST_TLBIEL 0x7c000224 -#define PPC_INST_TLBILX 0x7c000024 -#define PPC_INST_WAIT 0x7c00007c -#define PPC_INST_TLBIVAX 0x7c000624 -#define PPC_INST_TLBSRX_DOT 0x7c0006a5 -#define PPC_INST_VPMSUMW 0x10000488 -#define PPC_INST_VPMSUMD 0x100004c8 -#define PPC_INST_VPERMXOR 0x1000002d -#define PPC_INST_XXLOR 0xf0000490 -#define PPC_INST_XXSWAPD 0xf0000250 -#define PPC_INST_XVCPSGNDP 0xf0000780 #define PPC_INST_TRECHKPT 0x7c0007dd #define PPC_INST_TRECLAIM 0x7c00075d -#define PPC_INST_TABORT 0x7c00071d #define PPC_INST_TSR 0x7c0005dd - -#define PPC_INST_DCBF 0x7c0000ac - -#define PPC_INST_NAP 0x4c000364 -#define PPC_INST_SLEEP 0x4c0003a4 -#define PPC_INST_WINKLE 0x4c0003e4 - -#define PPC_INST_STOP 0x4c0002e4 - -/* A2 specific instructions */ -#define PPC_INST_ERATWE 0x7c0001a6 -#define PPC_INST_ERATRE 0x7c000166 -#define PPC_INST_ERATILX 0x7c000066 -#define PPC_INST_ERATIVAX 0x7c000666 -#define PPC_INST_ERATSX 0x7c000126 -#define PPC_INST_ERATSX_DOT 0x7c000127 - -/* Misc instructions for BPF compiler */ -#define PPC_INST_LBZ 0x88000000 #define PPC_INST_LD 0xe8000000 -#define PPC_INST_LDX 0x7c00002a -#define PPC_INST_LHZ 0xa0000000 -#define PPC_INST_LWZ 0x80000000 -#define PPC_INST_LHBRX 0x7c00062c -#define PPC_INST_LDBRX 0x7c000428 -#define PPC_INST_STB 0x98000000 -#define PPC_INST_STH 0xb0000000 #define PPC_INST_STD 0xf8000000 -#define PPC_INST_STDX 0x7c00012a -#define PPC_INST_STDU 0xf8000001 -#define PPC_INST_STW 0x90000000 -#define PPC_INST_STWU 0x94000000 #define PPC_INST_MFLR 0x7c0802a6 -#define PPC_INST_MTLR 0x7c0803a6 #define PPC_INST_MTCTR 0x7c0903a6 -#define PPC_INST_CMPWI 0x2c000000 -#define PPC_INST_CMPDI 0x2c200000 -#define PPC_INST_CMPW 0x7c000000 -#define PPC_INST_CMPD 0x7c200000 -#define PPC_INST_CMPLW 0x7c000040 -#define PPC_INST_CMPLD 0x7c200040 -#define PPC_INST_CMPLWI 0x28000000 -#define PPC_INST_CMPLDI 0x28200000 #define PPC_INST_ADDI 0x38000000 #define PPC_INST_ADDIS 0x3c000000 #define PPC_INST_ADD 0x7c000214 -#define PPC_INST_ADDC 0x7c000014 -#define PPC_INST_SUB 0x7c000050 #define PPC_INST_BLR 0x4e800020 -#define PPC_INST_BLRL 0x4e800021 #define PPC_INST_BCTR 0x4e800420 #define PPC_INST_BCTRL 0x4e800421 -#define PPC_INST_MULLD 0x7c0001d2 -#define PPC_INST_MULLW 0x7c0001d6 -#define PPC_INST_MULHWU 0x7c000016 -#define PPC_INST_MULLI 0x1c000000 -#define PPC_INST_MADDHD 0x10000030 -#define PPC_INST_MADDHDU 0x10000031 -#define PPC_INST_MADDLD 0x10000033 -#define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 -#define PPC_INST_DIVDU 0x7c000392 -#define PPC_INST_RLWINM 0x54000000 -#define PPC_INST_RLWINM_DOT 0x54000001 -#define PPC_INST_RLWIMI 0x50000000 -#define PPC_INST_RLDICL 0x78000000 #define PPC_INST_RLDICR 0x78000004 -#define PPC_INST_SLW 0x7c000030 -#define PPC_INST_SLD 0x7c000036 -#define PPC_INST_SRW 0x7c000430 -#define PPC_INST_SRAW 0x7c000630 -#define PPC_INST_SRAWI 0x7c000670 -#define PPC_INST_SRD 0x7c000436 -#define PPC_INST_SRAD 0x7c000634 -#define PPC_INST_SRADI 0x7c000674 -#define PPC_INST_AND 0x7c000038 -#define PPC_INST_ANDDOT 0x7c000039 -#define PPC_INST_OR 0x7c000378 -#define PPC_INST_XOR 0x7c000278 -#define PPC_INST_ANDI 0x70000000 #define PPC_INST_ORI 0x60000000 #define PPC_INST_ORIS 0x64000000 -#define PPC_INST_XORI 0x68000000 -#define PPC_INST_XORIS 0x6c000000 -#define PPC_INST_NEG 0x7c0000d0 -#define PPC_INST_EXTSW 0x7c0007b4 #define PPC_INST_BRANCH 0x48000000 #define PPC_INST_BRANCH_COND 0x40800000 -#define PPC_INST_LBZCIX 0x7c0006aa -#define PPC_INST_STBCIX 0x7c0007aa -#define PPC_INST_LWZX 0x7c00002e -#define PPC_INST_LFSX 0x7c00042e -#define PPC_INST_STFSX 0x7c00052e -#define PPC_INST_LFDX 0x7c0004ae -#define PPC_INST_STFDX 0x7c0005ae -#define PPC_INST_LVX 0x7c0000ce -#define PPC_INST_STVX 0x7c0001ce -#define PPC_INST_VCMPEQUD 0x100000c7 -#define PPC_INST_VCMPEQUB 0x10000006 /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) @@ -451,168 +317,166 @@ #endif /* Base instruction encoding */ -#define PPC_RAW_CP_ABORT (PPC_INST_CP_ABORT) +#define PPC_RAW_CP_ABORT (0x7c00068c) #define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_DARN(t, l) (PPC_INST_DARN | ___PPC_RT(t) | (((l) & 0x3) << 16)) -#define PPC_RAW_DCBAL(a, b) (PPC_INST_DCBAL | __PPC_RA(a) | __PPC_RB(b)) -#define PPC_RAW_DCBZL(a, b) (PPC_INST_DCBZL | __PPC_RA(a) | __PPC_RB(b)) -#define PPC_RAW_LQARX(t, a, b, eh) (PPC_INST_LQARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_RAW_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_RAW_LWARX(t, a, b, eh) (PPC_INST_LWARX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) -#define PPC_RAW_PHWSYNC (PPC_INST_PHWSYNC) -#define PPC_RAW_PLWSYNC (PPC_INST_PLWSYNC) -#define PPC_RAW_STQCX(t, a, b) (PPC_INST_STQCX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_MADDHD(t, a, b, c) (PPC_INST_MADDHD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) -#define PPC_RAW_MADDHDU(t, a, b, c) (PPC_INST_MADDHDU | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) -#define PPC_RAW_MADDLD(t, a, b, c) (PPC_INST_MADDLD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) -#define PPC_RAW_MSGSND(b) (PPC_INST_MSGSND | ___PPC_RB(b)) -#define PPC_RAW_MSGSYNC (PPC_INST_MSGSYNC) -#define PPC_RAW_MSGCLR(b) (PPC_INST_MSGCLR | ___PPC_RB(b)) -#define PPC_RAW_MSGSNDP(b) (PPC_INST_MSGSNDP | ___PPC_RB(b)) -#define PPC_RAW_MSGCLRP(b) (PPC_INST_MSGCLRP | ___PPC_RB(b)) -#define PPC_RAW_PASTE(a, b) (PPC_INST_PASTE | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DARN(t, l) (0x7c0005e6 | ___PPC_RT(t) | (((l) & 0x3) << 16)) +#define PPC_RAW_DCBAL(a, b) (0x7c2005ec | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_DCBZL(a, b) (0x7c2007ec | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_LQARX(t, a, b, eh) (0x7c000228 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_RAW_LDARX(t, a, b, eh) (0x7c0000a8 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_RAW_LWARX(t, a, b, eh) (0x7c000028 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh)) +#define PPC_RAW_PHWSYNC (0x7c8004ac) +#define PPC_RAW_PLWSYNC (0x7ca004ac) +#define PPC_RAW_STQCX(t, a, b) (0x7c00016d | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MADDHD(t, a, b, c) (0x10000030 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_RAW_MADDHDU(t, a, b, c) (0x10000031 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_RAW_MADDLD(t, a, b, c) (0x10000033 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_RAW_MSGSND(b) (0x7c00019c | ___PPC_RB(b)) +#define PPC_RAW_MSGSYNC (0x7c0006ec) +#define PPC_RAW_MSGCLR(b) (0x7c0001dc | ___PPC_RB(b)) +#define PPC_RAW_MSGSNDP(b) (0x7c00011c | ___PPC_RB(b)) +#define PPC_RAW_MSGCLRP(b) (0x7c00015c | ___PPC_RB(b)) +#define PPC_RAW_PASTE(a, b) (0x7c20070d | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_POPCNTB(a, s) (PPC_INST_POPCNTB | __PPC_RA(a) | __PPC_RS(s)) -#define PPC_RAW_POPCNTD(a, s) (PPC_INST_POPCNTD | __PPC_RA(a) | __PPC_RS(s)) -#define PPC_RAW_POPCNTW(a, s) (PPC_INST_POPCNTW | __PPC_RA(a) | __PPC_RS(s)) -#define PPC_RAW_RFCI (PPC_INST_RFCI) -#define PPC_RAW_RFDI (PPC_INST_RFDI) -#define PPC_RAW_RFMCI (PPC_INST_RFMCI) -#define PPC_RAW_TLBILX(t, a, b) (PPC_INST_TLBILX | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_WAIT(w) (PPC_INST_WAIT | __PPC_WC(w)) -#define PPC_RAW_TLBIE(lp, a) (PPC_INST_TLBIE | ___PPC_RB(a) | ___PPC_RS(lp)) +#define PPC_RAW_POPCNTD(a, s) (0x7c0003f4 | __PPC_RA(a) | __PPC_RS(s)) +#define PPC_RAW_POPCNTW(a, s) (0x7c0002f4 | __PPC_RA(a) | __PPC_RS(s)) +#define PPC_RAW_RFCI (0x4c000066) +#define PPC_RAW_RFDI (0x4c00004e) +#define PPC_RAW_RFMCI (0x4c00004c) +#define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_WAIT(w) (0x7c00007c | __PPC_WC(w)) +#define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp)) #define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \ - (PPC_INST_TLBIE | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) + (0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) #define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \ - (PPC_INST_TLBIEL | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) -#define PPC_RAW_TLBSRX_DOT(a, b) (PPC_INST_TLBSRX_DOT | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_TLBIVAX(a, b) (PPC_INST_TLBIVAX | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_ERATWE(s, a, w) (PPC_INST_ERATWE | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) -#define PPC_RAW_ERATRE(s, a, w) (PPC_INST_ERATRE | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) -#define PPC_RAW_ERATILX(t, a, b) (PPC_INST_ERATILX | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_ERATIVAX(s, a, b) (PPC_INST_ERATIVAX | __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_ERATSX(t, a, w) (PPC_INST_ERATSX | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_ERATSX_DOT(t, a, w) (PPC_INST_ERATSX_DOT | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_SLBFEE_DOT(t, b) (PPC_INST_SLBFEE | __PPC_RT(t) | __PPC_RB(b)) -#define __PPC_RAW_SLBFEE_DOT(t, b) (PPC_INST_SLBFEE | ___PPC_RT(t) | ___PPC_RB(b)) -#define PPC_RAW_ICBT(c, a, b) (PPC_INST_ICBT | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_LBZCIX(t, a, b) (PPC_INST_LBZCIX | __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b)) -#define PPC_RAW_STBCIX(s, a, b) (PPC_INST_STBCIX | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) -#define PPC_RAW_DCBFPS(a, b) (PPC_INST_DCBF | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21)) -#define PPC_RAW_DCBSTPS(a, b) (PPC_INST_DCBF | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21)) + (0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) +#define PPC_RAW_TLBSRX_DOT(a, b) (0x7c0006a5 | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_TLBIVAX(a, b) (0x7c000624 | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATWE(s, a, w) (0x7c0001a6 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) +#define PPC_RAW_ERATRE(s, a, w) (0x7c000166 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) +#define PPC_RAW_ERATILX(t, a, b) (0x7c000066 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATIVAX(s, a, b) (0x7c000666 | __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATSX(t, a, w) (0x7c000126 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_ERATSX_DOT(t, a, w) (0x7c000127 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | __PPC_RT(t) | __PPC_RB(b)) +#define __PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | ___PPC_RT(t) | ___PPC_RB(b)) +#define PPC_RAW_ICBT(c, a, b) (0x7c00002c | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) +#define PPC_RAW_LBZCIX(t, a, b) (0x7c0006aa | __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21)) +#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21)) /* * Define what the VSX XX1 form instructions will look like, then add * the 128 bit load store instructions based on that. */ #define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) #define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) -#define PPC_RAW_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), a, b)) -#define PPC_RAW_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), a, b)) -#define PPC_RAW_MFVRD(a, t) (PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, R0)) -#define PPC_RAW_MTVRD(t, a) (PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, R0)) -#define PPC_RAW_VPMSUMW(t, a, b) (PPC_INST_VPMSUMW | VSX_XX3((t), a, b)) -#define PPC_RAW_VPMSUMD(t, a, b) (PPC_INST_VPMSUMD | VSX_XX3((t), a, b)) -#define PPC_RAW_XXLOR(t, a, b) (PPC_INST_XXLOR | VSX_XX3((t), a, b)) -#define PPC_RAW_XXSWAPD(t, a) (PPC_INST_XXSWAPD | VSX_XX3((t), a, a)) -#define PPC_RAW_XVCPSGNDP(t, a, b) ((PPC_INST_XVCPSGNDP | VSX_XX3((t), (a), (b)))) +#define PPC_RAW_STXVD2X(s, a, b) (0x7c000798 | VSX_XX1((s), a, b)) +#define PPC_RAW_LXVD2X(s, a, b) (0x7c000698 | VSX_XX1((s), a, b)) +#define PPC_RAW_MFVRD(a, t) (0x7c000066 | VSX_XX1((t) + 32, a, R0)) +#define PPC_RAW_MTVRD(t, a) (0x7c000166 | VSX_XX1((t) + 32, a, R0)) +#define PPC_RAW_VPMSUMW(t, a, b) (0x10000488 | VSX_XX3((t), a, b)) +#define PPC_RAW_VPMSUMD(t, a, b) (0x100004c8 | VSX_XX3((t), a, b)) +#define PPC_RAW_XXLOR(t, a, b) (0xf0000490 | VSX_XX3((t), a, b)) +#define PPC_RAW_XXSWAPD(t, a) (0xf0000250 | VSX_XX3((t), a, a)) +#define PPC_RAW_XVCPSGNDP(t, a, b) ((0xf0000780 | VSX_XX3((t), (a), (b)))) #define PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc) \ - ((PPC_INST_VPERMXOR | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6))) -#define PPC_RAW_NAP (PPC_INST_NAP) -#define PPC_RAW_SLEEP (PPC_INST_SLEEP) -#define PPC_RAW_WINKLE (PPC_INST_WINKLE) -#define PPC_RAW_STOP (PPC_INST_STOP) -#define PPC_RAW_CLRBHRB (PPC_INST_CLRBHRB) -#define PPC_RAW_MFBHRBE(r, n) (PPC_INST_BHRBE | __PPC_RT(r) | (((n) & 0x3ff) << 11)) + ((0x1000002d | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6))) +#define PPC_RAW_NAP (0x4c000364) +#define PPC_RAW_SLEEP (0x4c0003a4) +#define PPC_RAW_WINKLE (0x4c0003e4) +#define PPC_RAW_STOP (0x4c0002e4) +#define PPC_RAW_CLRBHRB (0x7c00035c) +#define PPC_RAW_MFBHRBE(r, n) (0x7c00025c | __PPC_RT(r) | (((n) & 0x3ff) << 11)) #define PPC_RAW_TRECHKPT (PPC_INST_TRECHKPT) #define PPC_RAW_TRECLAIM(r) (PPC_INST_TRECLAIM | __PPC_RA(r)) -#define PPC_RAW_TABORT(r) (PPC_INST_TABORT | __PPC_RA(r)) +#define PPC_RAW_TABORT(r) (0x7c00071d | __PPC_RA(r)) #define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6)) -#define PPC_RAW_MTTMR(tmr, r) (PPC_INST_MTTMR | TMRN(tmr) | ___PPC_RS(r)) -#define PPC_RAW_MFTMR(tmr, r) (PPC_INST_MFTMR | TMRN(tmr) | ___PPC_RT(r)) -#define PPC_RAW_ICSWX(s, a, b) (PPC_INST_ICSWX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_ICSWEPX(s, a, b) (PPC_INST_ICSWEPX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_SLBIA(IH) (PPC_INST_SLBIA | (((IH) & 0x7) << 21)) +#define PPC_RAW_MTTMR(tmr, r) (0x7c0003dc | TMRN(tmr) | ___PPC_RS(r)) +#define PPC_RAW_MFTMR(tmr, r) (0x7c0002dc | TMRN(tmr) | ___PPC_RT(r)) +#define PPC_RAW_ICSWX(s, a, b) (0x7c00032d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ICSWEPX(s, a, b) (0x7c00076d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_SLBIA(IH) (0x7c0003e4 | (((IH) & 0x7) << 21)) #define PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb) \ - (PPC_INST_VCMPEQUD | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) + (0x100000c7 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) #define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \ - (PPC_INST_VCMPEQUB | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) + (0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) #define PPC_RAW_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) -#define PPC_RAW_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i)) -#define PPC_RAW_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STDCX(s, a, b) (0x7c0001ad | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LFSX(t, a, b) (0x7c00042e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STFSX(s, a, b) (0x7c00052e | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LFDX(t, a, b) (0x7c0004ae | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) -#define PPC_RAW_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) +#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_NOP() (PPC_INST_NOP) #define PPC_RAW_BLR() (PPC_INST_BLR) -#define PPC_RAW_BLRL() (PPC_INST_BLRL) -#define PPC_RAW_MTLR(r) (PPC_INST_MTLR | ___PPC_RT(r)) +#define PPC_RAW_BLRL() (0x4e800021) +#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r)) #define PPC_RAW_BCTR() (PPC_INST_BCTR) #define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r)) #define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i) #define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i) -#define PPC_RAW_STDX(r, base, b) (PPC_INST_STDX | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_RAW_STDU(r, base, i) (PPC_INST_STDU | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc)) -#define PPC_RAW_STW(r, base, i) (PPC_INST_STW | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_STWU(r, base, i) (PPC_INST_STWU | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_STH(r, base, i) (PPC_INST_STH | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_STB(r, base, i) (PPC_INST_STB | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_LBZ(r, base, i) (PPC_INST_LBZ | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_LDX(r, base, b) (PPC_INST_LDX | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_RAW_LHZ(r, base, i) (PPC_INST_LHZ | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) -#define PPC_RAW_LHBRX(r, base, b) (PPC_INST_LHBRX | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_RAW_LDBRX(r, base, b) (PPC_INST_LDBRX | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) -#define PPC_RAW_STWCX(s, a, b) (PPC_INST_STWCX | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_CMPWI(a, i) (PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_RAW_CMPDI(a, i) (PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_RAW_CMPW(a, b) (PPC_INST_CMPW | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_CMPD(a, b) (PPC_INST_CMPD | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_CMPLWI(a, i) (PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_RAW_CMPLDI(a, i) (PPC_INST_CMPLDI | ___PPC_RA(a) | IMM_L(i)) -#define PPC_RAW_CMPLW(a, b) (PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_CMPLD(a, b) (PPC_INST_CMPLD | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_SUB(d, a, b) (PPC_INST_SUB | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b)) -#define PPC_RAW_MULD(d, a, b) (PPC_INST_MULLD | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_MULW(d, a, b) (PPC_INST_MULLW | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_MULHWU(d, a, b) (PPC_INST_MULHWU | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_MULI(d, a, i) (PPC_INST_MULLI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) -#define PPC_RAW_DIVWU(d, a, b) (PPC_INST_DIVWU | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_DIVDU(d, a, b) (PPC_INST_DIVDU | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) -#define PPC_RAW_AND(d, a, b) (PPC_INST_AND | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_RAW_ANDI(d, a, i) (PPC_INST_ANDI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) -#define PPC_RAW_AND_DOT(d, a, b) (PPC_INST_ANDDOT | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_RAW_OR(d, a, b) (PPC_INST_OR | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc)) +#define PPC_RAW_STW(r, base, i) (0x90000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_STWU(r, base, i) (0x94000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_STH(r, base, i) (0xb0000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPDI(a, i) (0x2c200000 | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPW(a, b) (0x7c000000 | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPD(a, b) (0x7c200000 | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPLWI(a, i) (0x28000000 | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPLDI(a, i) (0x28200000 | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b)) +#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a) #define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) #define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) -#define PPC_RAW_XOR(d, a, b) (PPC_INST_XOR | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) -#define PPC_RAW_XORI(d, a, i) (PPC_INST_XORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) -#define PPC_RAW_XORIS(d, a, i) (PPC_INST_XORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) -#define PPC_RAW_EXTSW(d, a) (PPC_INST_EXTSW | ___PPC_RA(d) | ___PPC_RS(a)) -#define PPC_RAW_SLW(d, a, s) (PPC_INST_SLW | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_RAW_SLD(d, a, s) (PPC_INST_SLD | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_RAW_SRW(d, a, s) (PPC_INST_SRW | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_RAW_SRAW(d, a, s) (PPC_INST_SRAW | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_RAW_SRAWI(d, a, i) (PPC_INST_SRAWI | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i)) -#define PPC_RAW_SRD(d, a, s) (PPC_INST_SRD | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_RAW_SRAD(d, a, s) (PPC_INST_SRAD | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) -#define PPC_RAW_SRADI(d, a, i) (PPC_INST_SRADI | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i)) -#define PPC_RAW_RLWINM(d, a, i, mb, me) \ - (PPC_INST_RLWINM | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) +#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a)) +#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRW(d, a, s) (0x7c000430 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRAW(d, a, s) (0x7c000630 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRAWI(d, a, i) (0x7c000670 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i)) +#define PPC_RAW_SRD(d, a, s) (0x7c000436 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRAD(d, a, s) (0x7c000634 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) +#define PPC_RAW_SRADI(d, a, i) (0x7c000674 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i)) +#define PPC_RAW_RLWINM(d, a, i, mb, me) (0x54000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) #define PPC_RAW_RLWINM_DOT(d, a, i, mb, me) \ - (PPC_INST_RLWINM_DOT | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) -#define PPC_RAW_RLWIMI(d, a, i, mb, me) \ - (PPC_INST_RLWIMI | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) -#define PPC_RAW_RLDICL(d, a, i, mb) (PPC_INST_RLDICL | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb)) + (0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb)) #define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me)) /* slwi = rlwinm Rx, Ry, n, 0, 31-n */ @@ -624,7 +488,7 @@ /* sldi = rldicl Rx, Ry, 64-n, n */ #define PPC_RAW_SRDI(d, a, i) PPC_RAW_RLDICL(d, a, 64-(i), i) -#define PPC_RAW_NEG(d, a) (PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a)) +#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a)) /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) -- cgit v1.2.3 From ade7667a981be49af9310f7c682c226283ec833d Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 13 Jul 2020 18:36:01 +1000 Subject: powerpc: Add cputime_to_nsecs() Generic code has a wrapper to implement cputime_to_nsecs() on top of cputime_to_usecs() but we can easily return the full nanosecond resolution directly. Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713083601.1103978-1-anton@ozlabs.org --- arch/powerpc/include/asm/cputime.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 0fccd5ea1e9a..9335b93924b4 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -36,6 +36,8 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct) return mulhdu((__force u64) ct, __cputime_usec_factor); } +#define cputime_to_nsecs(cputime) tb_to_ns((__force u64)cputime) + /* * PPC64 uses PACA which is task independent for storing accounting data while * PPC32 uses struct thread_info, therefore at task switch the accounting data -- cgit v1.2.3 From ba608c4fa12cfd0cab0e153249c29441f4dd3312 Mon Sep 17 00:00:00 2001 From: Sourabh Jain Date: Mon, 13 Jul 2020 10:54:35 +0530 Subject: powerpc/fadump: fix race between pstore write and fadump crash trigger When we enter into fadump crash path via system reset we fail to update the pstore. On the system reset path we first update the pstore then we go for fadump crash. But the problem here is when all the CPUs try to get the pstore lock to initiate the pstore write, only one CPUs will acquire the lock and proceed with the pstore write. Since it in NMI context CPUs that fail to get lock do not wait for their turn to write to the pstore and simply proceed with the next operation which is fadump crash. One of the CPU who proceeded with fadump crash path triggers the crash and does not wait for the CPU who gets the pstore lock to complete the pstore update. Timeline diagram to depicts the sequence of events that leads to an unsuccessful pstore update when we hit fadump crash path via system reset. 1 2 3 ... n CPU Threads | | | | | | | | Reached to -->|--->|---->| ----------->| system reset | | | | path | | | | | | | | Try to -->|--->|---->|------------>| acquire the | | | | pstore lock | | | | | | | | | | | | Got the -->| +->| | |<-+ pstore lock | | | | | |--> Didn't get the | --------------------------+ lock and moving | | | | ahead on fadump | | | | crash path | | | | Begins the -->| | | | process to | | | |<-- Got the chance to update the | | | | trigger the crash pstore | -> | | ... <- | | | | | | | | | | | | |<-- Triggers the | | | | | | crash | | | | | | ^ | | | | | | | Writing to -->| | | | | | | pstore | | | | | | | | | | ^ |__________________| | | CPU Relax | | | +-----------------------------------------+ | v Race: crash triggered before pstore update completes To avoid this race condition a barrier is added on crash_fadump path, it prevents the CPU to trigger the crash until all the online CPUs completes their task. A barrier is added to make sure all the secondary CPUs hit the crash_fadump function before we initiates the crash. A timeout is kept to ensure the primary CPU (one who initiates the crash) do not wait for secondary CPUs indefinitely. Signed-off-by: Sourabh Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713052435.183750-1-sourabhjain@linux.ibm.com --- arch/powerpc/kernel/fadump.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 78ab9a6ee6ac..1858896d6809 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -32,11 +32,20 @@ #include #include +/* + * The CPU who acquired the lock to trigger the fadump crash should + * wait for other CPUs to enter. + * + * The timeout is in milliseconds. + */ +#define CRASH_TIMEOUT 500 + static struct fw_dump fw_dump; static void __init fadump_reserve_crash_area(u64 base); struct kobject *fadump_kobj; +static atomic_t cpus_in_fadump; #ifndef CONFIG_PRESERVE_FA_DUMP static DEFINE_MUTEX(fadump_mutex); @@ -668,8 +677,11 @@ early_param("fadump_reserve_mem", early_fadump_reserve_mem); void crash_fadump(struct pt_regs *regs, const char *str) { + unsigned int msecs; struct fadump_crash_info_header *fdh = NULL; int old_cpu, this_cpu; + /* Do not include first CPU */ + unsigned int ncpus = num_online_cpus() - 1; if (!should_fadump_crash()) return; @@ -685,6 +697,8 @@ void crash_fadump(struct pt_regs *regs, const char *str) old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu); if (old_cpu != -1) { + atomic_inc(&cpus_in_fadump); + /* * We can't loop here indefinitely. Wait as long as fadump * is in force. If we race with fadump un-registration this @@ -708,6 +722,16 @@ void crash_fadump(struct pt_regs *regs, const char *str) fdh->online_mask = *cpu_online_mask; + /* + * If we came in via system reset, wait a while for the secondary + * CPUs to enter. + */ + if (TRAP(&(fdh->regs)) == 0x100) { + msecs = CRASH_TIMEOUT; + while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0)) + mdelay(1); + } + fw_dump.ops->fadump_trigger(fdh, str); } -- cgit v1.2.3 From 29d9407e1037868b59d12948d42ad3ef58fc3a5a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:50:40 +0800 Subject: powerpc/xive: Remove unused inline function xive_kexec_teardown_cpu() commit e27e0a94651e ("powerpc/xive: Remove xive_kexec_teardown_cpu()") left behind this, remove it. Signed-off-by: YueHaibing Reviewed-by: Greg Kurz Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200715025040.33952-1-yuehaibing@huawei.com --- arch/powerpc/include/asm/xive.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index d08ea11b271c..309b4d65b74f 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -155,7 +155,6 @@ static inline void xive_smp_probe(void) { } static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; } static inline void xive_smp_setup_cpu(void) { } static inline void xive_smp_disable_cpu(void) { } -static inline void xive_kexec_teardown_cpu(int secondary) { } static inline void xive_shutdown(void) { } static inline void xive_flush_interrupt(void) { } -- cgit v1.2.3 From 89c140bbaeee7a55ed0360a88f294ead2b95201b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 15 Jul 2020 10:08:20 +1000 Subject: pseries: Fix 64 bit logical memory block panic Booting with a 4GB LMB size causes us to panic: qemu-system-ppc64: OS terminated: OS panic: Memory block size not suitable: 0x0 Fix pseries_memory_block_size() to handle 64 bit LMBs. Cc: stable@vger.kernel.org Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200715000820.1255764-1-anton@ozlabs.org --- arch/powerpc/platforms/pseries/hotplug-memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 73a5dcd977e1..5d545b78111f 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -25,7 +25,7 @@ unsigned long pseries_memory_block_size(void) { struct device_node *np; - unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; + u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); -- cgit v1.2.3 From 92fe8483b1660feaa602d8be6ca7efe95ae4789b Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 14 Jul 2020 22:24:24 +0800 Subject: cpuidle/pseries: Make symbol 'pseries_idle_driver' static The sparse tool complains as follows: drivers/cpuidle/cpuidle-pseries.c:25:23: warning: symbol 'pseries_idle_driver' was not declared. Should it be static? 'pseries_idle_driver' is not used outside of this file, so marks it static. Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200714142424.66648-1-weiyongjun1@huawei.com --- drivers/cpuidle/cpuidle-pseries.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 6513ef2af66a..3e058ad2bb51 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -22,7 +22,7 @@ #include #include -struct cpuidle_driver pseries_idle_driver = { +static struct cpuidle_driver pseries_idle_driver = { .name = "pseries_idle", .owner = THIS_MODULE, }; -- cgit v1.2.3 From 07497137a5efa9b2628c18083e8b07b33160153d Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 19:55:06 +0200 Subject: ocxl: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Acked-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713175506.36676-1-grandmaster@al2klimov.de --- drivers/misc/ocxl/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig index 2d2266c1439e..6551007a066c 100644 --- a/drivers/misc/ocxl/Kconfig +++ b/drivers/misc/ocxl/Kconfig @@ -23,7 +23,7 @@ config OCXL The ocxl driver enables userspace programs to access these accelerators through devices in /dev/ocxl/. - For more information, see http://opencapi.org. + For more information, see https://opencapi.org. This is not to be confused with the support for IBM CAPI accelerators (CONFIG_CXL), which are PCI-based instead of a -- cgit v1.2.3 From 9a3e3dccbf4317d02d28f8f99a5d1ccce42f9922 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 21:26:56 +0200 Subject: powerpc/Kconfig: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713192656.37443-1-grandmaster@al2klimov.de --- arch/powerpc/platforms/85xx/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index fa3d29dcb57e..b77cbb0a50e1 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -160,7 +160,7 @@ config XES_MPC85xx computers from Extreme Engineering Solutions (X-ES) based on Freescale MPC85xx processors. Manufacturer: Extreme Engineering Solutions, Inc. - URL: + URL: config STX_GP3 bool "Silicon Turnkey Express GP3" -- cgit v1.2.3 From 77ca3951cc37727ae8361d583a30da7a1b84e427 Mon Sep 17 00:00:00 2001 From: Anju T Sudhakar Date: Mon, 13 Jul 2020 20:16:23 +0530 Subject: powerpc/perf: Add kernel support for new MSR[HV PR] bits in trace-imc IMC trace-mode record has MSR[HV PR] bits added in the third DW. These bits can be used to set the cpumode for the instruction pointer captured in each sample. Add support in kernel to use these bits to set the cpumode for each sample. Signed-off-by: Anju T Sudhakar Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713144623.508695-1-maddy@linux.ibm.com --- arch/powerpc/include/asm/imc-pmu.h | 5 +++++ arch/powerpc/perf/imc-pmu.c | 29 ++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h index 4da4fcba0684..4f897993b710 100644 --- a/arch/powerpc/include/asm/imc-pmu.h +++ b/arch/powerpc/include/asm/imc-pmu.h @@ -99,6 +99,11 @@ struct trace_imc_data { */ #define IMC_TRACE_RECORD_TB1_MASK 0x3ffffffffffULL +/* + * Bit 0:1 in third DW of IMC trace record + * specifies the MSR[HV PR] values. + */ +#define IMC_TRACE_RECORD_VAL_HVPR(x) ((x) >> 62) /* * Device tree parser code detects IMC pmu support and diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 0edcfd0b491d..a45d694a5d5d 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1288,11 +1288,30 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem, header->size = sizeof(*header) + event->header_size; header->misc = 0; - if (is_kernel_addr(data->ip)) - header->misc |= PERF_RECORD_MISC_KERNEL; - else - header->misc |= PERF_RECORD_MISC_USER; - + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) { + case 0:/* when MSR HV and PR not set in the trace-record */ + header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; + break; + case 1: /* MSR HV is 0 and PR is 1 */ + header->misc |= PERF_RECORD_MISC_GUEST_USER; + break; + case 2: /* MSR HV is 1 and PR is 0 */ + header->misc |= PERF_RECORD_MISC_HYPERVISOR; + break; + case 3: /* MSR HV is 1 and PR is 1 */ + header->misc |= PERF_RECORD_MISC_USER; + break; + default: + pr_info("IMC: Unable to set the flag based on MSR bits\n"); + break; + } + } else { + if (is_kernel_addr(data->ip)) + header->misc |= PERF_RECORD_MISC_KERNEL; + else + header->misc |= PERF_RECORD_MISC_USER; + } perf_event_header__init_id(header, data, event); return 0; -- cgit v1.2.3 From a9f675f950a07d5c1dbcbb97aabac56f5ed085e3 Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Thu, 16 Jul 2020 09:37:04 +1000 Subject: powerpc/vdso: Fix vdso cpu truncation The code in vdso_cpu_init that exposes the cpu and numa node to userspace via SPRG_VDSO incorrctly masks the cpu to 12 bits. This means that any kernel running on a box with more than 4096 threads (NR_CPUS advertises a limit of of 8192 cpus) would expose userspace to two cpu contexts running at the same time with the same cpu number. Note: I'm not aware of any distro shipping a kernel with support for more than 4096 threads today, nor of any system image that currently exceeds 4096 threads. Found via code browsing. Fixes: 18ad51dd342a7eb09dbcd059d0b451b616d4dafc ("powerpc: Add VDSO version of getcpu") Signed-off-by: Milton Miller Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200715233704.1352257-1-anton@ozlabs.org --- arch/powerpc/kernel/vdso.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index e0f4ba45b6cc..8dad44262e75 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -677,7 +677,7 @@ int vdso_getcpu_init(void) node = cpu_to_node(cpu); WARN_ON_ONCE(node > 0xffff); - val = (cpu & 0xfff) | ((node & 0xffff) << 16); + val = (cpu & 0xffff) | ((node & 0xffff) << 16); mtspr(SPRN_SPRG_VDSO_WRITE, val); get_paca()->sprg_vdso = val; -- cgit v1.2.3 From 61f879d97ce4510dd29d676a20d67692e3b34806 Mon Sep 17 00:00:00 2001 From: Nayna Jain Date: Wed, 15 Jul 2020 07:52:01 -0400 Subject: powerpc/pseries: Detect secure and trusted boot state of the system. The device-tree properties to check secure and trusted boot state are different for guests (pseries) compared to baremetal (powernv). This patch updates the existing is_ppc_secureboot_enabled() and is_ppc_trustedboot_enabled() functions to add support for pseries. For pseries the secureboot and trustedboot state are exposed via device-tree properties /ibm,secure-boot and /ibm,trusted-boot. The values of ibm,secure-boot under pseries are interpreted as: 0 - Disabled 1 - Enabled in Log-only mode. This patch interprets this value as disabled, since audit mode is currently not supported for Linux. 2 - Enabled and enforced. 3-9 - Enabled and enforcing; requirements are at the discretion of the operating system. The values of ibm,trusted-boot under pseries are interpreted as: 0 - Disabled 1 - Enabled Signed-off-by: Nayna Jain Reviewed-by: Daniel Axtens Reviewed-by: Mimi Zohar [mpe: Drop machdep.h inclusion, tweak change log slightly] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594813921-12425-1-git-send-email-nayna@linux.ibm.com --- arch/powerpc/kernel/secure_boot.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/secure_boot.c b/arch/powerpc/kernel/secure_boot.c index 4b982324d368..f9af305d9579 100644 --- a/arch/powerpc/kernel/secure_boot.c +++ b/arch/powerpc/kernel/secure_boot.c @@ -23,12 +23,19 @@ bool is_ppc_secureboot_enabled(void) { struct device_node *node; bool enabled = false; + u32 secureboot; node = get_ppc_fw_sb_node(); enabled = of_property_read_bool(node, "os-secureboot-enforcing"); - of_node_put(node); + if (enabled) + goto out; + + if (!of_property_read_u32(of_root, "ibm,secure-boot", &secureboot)) + enabled = (secureboot > 1); + +out: pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled"); return enabled; @@ -38,12 +45,19 @@ bool is_ppc_trustedboot_enabled(void) { struct device_node *node; bool enabled = false; + u32 trustedboot; node = get_ppc_fw_sb_node(); enabled = of_property_read_bool(node, "trusted-enabled"); - of_node_put(node); + if (enabled) + goto out; + + if (!of_property_read_u32(of_root, "ibm,trusted-boot", &trustedboot)) + enabled = (trustedboot > 0); + +out: pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled"); return enabled; -- cgit v1.2.3 From 9a77c4a0a12597c661be374b8d566516c0341570 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 20 Jul 2020 10:12:58 +0530 Subject: powerpc/prom: Enable Radix GTSE in cpu pa-features When '029ab30b4c0a ("powerpc/mm: Enable radix GTSE only if supported.")' made GTSE an MMU feature, it was enabled by default in powerpc-cpu-features but was missed in pa-features. This causes random memory corruption during boot of PowerNV kernels where CONFIG_PPC_DT_CPU_FTRS isn't enabled. Fixes: 029ab30b4c0a ("powerpc/mm: Enable radix GTSE only if supported.") Reported-by: Qian Cai Signed-off-by: Nicholas Piggin Signed-off-by: Bharata B Rao [mpe: Unwrap long line] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200720044258.863574-1-bharata@linux.ibm.com --- arch/powerpc/kernel/prom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9cc49f265c86..033d43819ed8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -163,7 +163,7 @@ static struct ibm_pa_feature { { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, #ifdef CONFIG_PPC_RADIX_MMU - { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, + { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE }, #endif { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, -- cgit v1.2.3 From 645d5ce2f7d6cb4dcf6a4e087fb550e238d24283 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 18:49:22 +0530 Subject: powerpc/mm/radix: Fix PTE/PMD fragment count for early page table mappings We can hit the following BUG_ON during memory unplug: kernel BUG at arch/powerpc/mm/book3s64/pgtable.c:342! Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries NIP [c000000000093308] pmd_fragment_free+0x48/0xc0 LR [c00000000147bfec] remove_pagetable+0x578/0x60c Call Trace: 0xc000008050000000 (unreliable) remove_pagetable+0x384/0x60c radix__remove_section_mapping+0x18/0x2c remove_section_mapping+0x1c/0x3c arch_remove_memory+0x11c/0x180 try_remove_memory+0x120/0x1b0 __remove_memory+0x20/0x40 dlpar_remove_lmb+0xc0/0x114 dlpar_memory+0x8b0/0xb20 handle_dlpar_errorlog+0xc0/0x190 pseries_hp_work_fn+0x2c/0x60 process_one_work+0x30c/0x810 worker_thread+0x98/0x540 kthread+0x1c4/0x1d0 ret_from_kernel_thread+0x5c/0x74 This occurs when unplug is attempted for such memory which has been mapped using memblock pages as part of early kernel page table setup. We wouldn't have initialized the PMD or PTE fragment count for those PMD or PTE pages. This can be fixed by allocating memory in PAGE_SIZE granularity during early page table allocation. This makes sure a specific page is not shared for another memblock allocation and we can free them correctly on removing page-table pages. Since we now do PAGE_SIZE allocations for both PUD table and PMD table (Note that PTE table allocation is already of PAGE_SIZE), we end up allocating more memory for the same amount of system RAM. Here is a comparision of how much more we need for a 64T and 2G system after this patch: 1. 64T system ------------- 64T RAM would need 64G for vmemmap with struct page size being 64B. 128 PUD tables for 64T memory (1G mappings) 1 PUD table and 64 PMD tables for 64G vmemmap (2M mappings) With default PUD[PMD]_TABLE_SIZE(4K), (128+1+64)*4K=772K With PAGE_SIZE(64K) table allocations, (128+1+64)*64K=12352K 2. 2G system ------------ 2G RAM would need 2M for vmemmap with struct page size being 64B. 1 PUD table for 2G memory (1G mapping) 1 PUD table and 1 PMD table for 2M vmemmap (2M mappings) With default PUD[PMD]_TABLE_SIZE(4K), (1+1+1)*4K=12K With new PAGE_SIZE(64K) table allocations, (1+1+1)*64K=192K Signed-off-by: Bharata B Rao Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709131925.922266-2-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/pgalloc.h | 16 +++++++++++++++- arch/powerpc/mm/book3s64/pgtable.c | 5 ++++- arch/powerpc/mm/book3s64/radix_pgtable.c | 15 +++++++++++---- arch/powerpc/mm/pgtable-frag.c | 3 +++ 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 69c5b051734f..e1af0b394ceb 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -107,9 +107,23 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) return pud; } +static inline void __pud_free(pud_t *pud) +{ + struct page *page = virt_to_page(pud); + + /* + * Early pud pages allocated via memblock allocator + * can't be directly freed to slab + */ + if (PageReserved(page)) + free_reserved_page(page); + else + kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); +} + static inline void pud_free(struct mm_struct *mm, pud_t *pud) { - kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); + return __pud_free(pud); } static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index c58ad1049909..85de5b574dd7 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -339,6 +339,9 @@ void pmd_fragment_free(unsigned long *pmd) { struct page *page = virt_to_page(pmd); + if (PageReserved(page)) + return free_reserved_page(page); + BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&page->pt_frag_refcount)) { pgtable_pmd_page_dtor(page); @@ -356,7 +359,7 @@ static inline void pgtable_free(void *table, int index) pmd_fragment_free(table); break; case PUD_INDEX: - kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); + __pud_free(table); break; #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) /* 16M hugepd directory at pud level */ diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index bb00e0cba119..85806a6bed4d 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -56,6 +56,13 @@ static __ref void *early_alloc_pgtable(unsigned long size, int nid, return ptr; } +/* + * When allocating pud or pmd pointers, we allocate a complete page + * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This + * is to ensure that the page obtained from the memblock allocator + * can be completely used as page table page and can be freed + * correctly when the page table entries are removed. + */ static int early_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t flags, unsigned int map_page_size, @@ -72,8 +79,8 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (p4d_none(*p4dp)) { - pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid, - region_start, region_end); + pudp = early_alloc_pgtable(PAGE_SIZE, nid, + region_start, region_end); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); @@ -82,8 +89,8 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa, goto set_the_pte; } if (pud_none(*pudp)) { - pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid, - region_start, region_end); + pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start, + region_end); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index ee4bd6d38602..97ae4935da79 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -110,6 +110,9 @@ void pte_fragment_free(unsigned long *table, int kernel) { struct page *page = virt_to_page(table); + if (PageReserved(page)) + return free_reserved_page(page); + BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&page->pt_frag_refcount)) { if (!kernel) -- cgit v1.2.3 From 9ce8853b4a735c8115f55ac0e9c2b27a4c8f80b5 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Thu, 9 Jul 2020 18:49:23 +0530 Subject: powerpc/mm/radix: Free PUD table when freeing pagetable remove_pagetable() isn't freeing PUD table. This causes memory leak during memory unplug. Fix this. Fixes: 4b5d62ca17a1 ("powerpc/mm: add radix__remove_section_mapping()") Signed-off-by: Bharata B Rao Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709131925.922266-3-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/radix_pgtable.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 85806a6bed4d..46ad2da3087a 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -707,6 +707,21 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) pud_clear(pud); } +static void free_pud_table(pud_t *pud_start, p4d_t *p4d) +{ + pud_t *pud; + int i; + + for (i = 0; i < PTRS_PER_PUD; i++) { + pud = pud_start + i; + if (!pud_none(*pud)) + return; + } + + pud_free(&init_mm, pud_start); + p4d_clear(p4d); +} + struct change_mapping_params { pte_t *pte; unsigned long start; @@ -881,6 +896,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end) pud_base = (pud_t *)p4d_page_vaddr(*p4d); remove_pud_table(pud_base, addr, next); + free_pud_table(pud_base, p4d); } spin_unlock(&init_mm.page_table_lock); -- cgit v1.2.3 From d6d6ebfc5dbb4008be21baa4ec2ad45606578966 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Thu, 9 Jul 2020 18:49:24 +0530 Subject: powerpc/mm/radix: Remove split_kernel_mapping() We split the page table mapping on memory unplug if the linear range was mapped with huge page mapping (for ex: 1G) The page table splitting code has a few issues: 1. Recursive locking -------------------- Memory unplug path takes cpu_hotplug_lock and calls stop_machine() for splitting the mappings. However stop_machine() takes cpu_hotplug_lock again causing deadlock. 2. BUG: sleeping function called from in_atomic() context --------------------------------------------------------- Memory unplug path (remove_pagetable) takes init_mm.page_table_lock spinlock and later calls stop_machine() which does wait_for_completion() 3. Bad unlock unbalance ----------------------- Memory unplug path takes init_mm.page_table_lock spinlock and calls stop_machine(). The stop_machine thread function runs in a different thread context (migration thread) which tries to release and reaquire ptl. Releasing ptl from a different thread than which acquired it causes bad unlock unbalance. These problems can be avoided if we avoid mapping hot-plugged memory with 1G mapping, thereby removing the need for splitting them during unplug. The kernel always make sure the minimum unplug request is SUBSECTION_SIZE for device memory and SECTION_SIZE for regular memory. In preparation for such a change remove page table splitting support. This essentially is a revert of commit 4dd5f8a99e791 ("powerpc/mm/radix: Split linear mapping on hot-unplug") Signed-off-by: Bharata B Rao Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709131925.922266-4-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/radix_pgtable.c | 95 +++++++------------------------- 1 file changed, 19 insertions(+), 76 deletions(-) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 46ad2da3087a..d5a01b9aadc9 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -722,32 +721,6 @@ static void free_pud_table(pud_t *pud_start, p4d_t *p4d) p4d_clear(p4d); } -struct change_mapping_params { - pte_t *pte; - unsigned long start; - unsigned long end; - unsigned long aligned_start; - unsigned long aligned_end; -}; - -static int __meminit stop_machine_change_mapping(void *data) -{ - struct change_mapping_params *params = - (struct change_mapping_params *)data; - - if (!data) - return -1; - - spin_unlock(&init_mm.page_table_lock); - pte_clear(&init_mm, params->aligned_start, params->pte); - create_physical_mapping(__pa(params->aligned_start), - __pa(params->start), -1, PAGE_KERNEL); - create_physical_mapping(__pa(params->end), __pa(params->aligned_end), - -1, PAGE_KERNEL); - spin_lock(&init_mm.page_table_lock); - return 0; -} - static void remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end) { @@ -776,52 +749,6 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, } } -/* - * clear the pte and potentially split the mapping helper - */ -static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end, - unsigned long size, pte_t *pte) -{ - unsigned long mask = ~(size - 1); - unsigned long aligned_start = addr & mask; - unsigned long aligned_end = addr + size; - struct change_mapping_params params; - bool split_region = false; - - if ((end - addr) < size) { - /* - * We're going to clear the PTE, but not flushed - * the mapping, time to remap and flush. The - * effects if visible outside the processor or - * if we are running in code close to the - * mapping we cleared, we are in trouble. - */ - if (overlaps_kernel_text(aligned_start, addr) || - overlaps_kernel_text(end, aligned_end)) { - /* - * Hack, just return, don't pte_clear - */ - WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " - "text, not splitting\n", addr, end); - return; - } - split_region = true; - } - - if (split_region) { - params.pte = pte; - params.start = addr; - params.end = end; - params.aligned_start = addr & ~(size - 1); - params.aligned_end = min_t(unsigned long, aligned_end, - (unsigned long)__va(memblock_end_of_DRAM())); - stop_machine(stop_machine_change_mapping, ¶ms, NULL); - return; - } - - pte_clear(&init_mm, addr, pte); -} - static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end) { @@ -837,7 +764,12 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, continue; if (pmd_is_leaf(*pmd)) { - split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); + if (!IS_ALIGNED(addr, PMD_SIZE) || + !IS_ALIGNED(next, PMD_SIZE)) { + WARN_ONCE(1, "%s: unaligned range\n", __func__); + continue; + } + pte_clear(&init_mm, addr, (pte_t *)pmd); continue; } @@ -862,7 +794,12 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, continue; if (pud_is_leaf(*pud)) { - split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); + if (!IS_ALIGNED(addr, PUD_SIZE) || + !IS_ALIGNED(next, PUD_SIZE)) { + WARN_ONCE(1, "%s: unaligned range\n", __func__); + continue; + } + pte_clear(&init_mm, addr, (pte_t *)pud); continue; } @@ -890,7 +827,13 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end) continue; if (p4d_is_leaf(*p4d)) { - split_kernel_mapping(addr, end, P4D_SIZE, (pte_t *)p4d); + if (!IS_ALIGNED(addr, P4D_SIZE) || + !IS_ALIGNED(next, P4D_SIZE)) { + WARN_ONCE(1, "%s: unaligned range\n", __func__); + continue; + } + + pte_clear(&init_mm, addr, (pte_t *)pgd); continue; } -- cgit v1.2.3 From af9d00e93a4f062c5f160325d7b8f33336f6744e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 18:49:25 +0530 Subject: powerpc/mm/radix: Create separate mappings for hot-plugged memory To enable memory unplug without splitting kernel page table mapping, we force the max mapping size to the LMB size. LMB size is the unit in which hypervisor will do memory add/remove operation. Pseries systems supports max LMB size of 256MB. Hence on pseries, we now end up mapping memory with 2M page size instead of 1G. To improve that we want hypervisor to hint the kernel about the hotplug memory range. That was added that as part of commit b6eca183e23e ("powerpc/kernel: Enables memory hot-remove after reboot on pseries guests") But PowerVM doesn't provide that hint yet. Once we get PowerVM updated, we can then force the 2M mapping only to hot-pluggable memory region using memblock_is_hotpluggable(). Till then let's depend on LMB size for finding the mapping page size for linear range. With this change KVM guest will also be doing linear mapping with 2M page size. The actual TLB benefit of mapping guest page table entries with hugepage size can only be materialized if the partition scoped entries are also using the same or higher page size. A guest using 1G hugetlbfs backing guest memory can have a performance impact with the above change. Signed-off-by: Bharata B Rao Signed-off-by: Aneesh Kumar K.V [mpe: Fold in fix from Aneesh spotted by lkp@intel.com] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709131925.922266-5-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/mmu.h | 5 ++ arch/powerpc/mm/book3s64/radix_pgtable.c | 91 ++++++++++++++++++++++++++++---- arch/powerpc/platforms/powernv/setup.c | 10 +++- 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 5393a535240c..15aae924f41c 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -82,6 +82,11 @@ extern unsigned int mmu_pid_bits; /* Base PID to allocate from */ extern unsigned int mmu_base_pid; +/* + * memory block size used with radix translation. + */ +extern unsigned int __ro_after_init radix_mem_block_size; + #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) #define PRTB_ENTRIES (1ul << mmu_pid_bits) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index d5a01b9aadc9..c5bf2ef73c36 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,7 @@ unsigned int mmu_pid_bits; unsigned int mmu_base_pid; +unsigned int radix_mem_block_size __ro_after_init; static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) @@ -265,6 +267,7 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end) static int __meminit create_physical_mapping(unsigned long start, unsigned long end, + unsigned long max_mapping_size, int nid, pgprot_t _prot) { unsigned long vaddr, addr, mapping_size = 0; @@ -278,6 +281,8 @@ static int __meminit create_physical_mapping(unsigned long start, int rc; gap = next_boundary(addr, end) - addr; + if (gap > max_mapping_size) + gap = max_mapping_size; previous_size = mapping_size; prev_exec = exec; @@ -328,8 +333,9 @@ static void __init radix_init_pgtable(void) /* We don't support slb for radix */ mmu_slb_size = 0; + /* - * Create the linear mapping, using standard page size for now + * Create the linear mapping */ for_each_memblock(memory, reg) { /* @@ -345,6 +351,7 @@ static void __init radix_init_pgtable(void) WARN_ON(create_physical_mapping(reg->base, reg->base + reg->size, + radix_mem_block_size, -1, PAGE_KERNEL)); } @@ -485,6 +492,57 @@ static int __init radix_dt_scan_page_sizes(unsigned long node, return 1; } +#ifdef CONFIG_MEMORY_HOTPLUG +static int __init probe_memory_block_size(unsigned long node, const char *uname, int + depth, void *data) +{ + unsigned long *mem_block_size = (unsigned long *)data; + const __be64 *prop; + int len; + + if (depth != 1) + return 0; + + if (strcmp(uname, "ibm,dynamic-reconfiguration-memory")) + return 0; + + prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); + if (!prop || len < sizeof(__be64)) + /* + * Nothing in the device tree + */ + *mem_block_size = MIN_MEMORY_BLOCK_SIZE; + else + *mem_block_size = be64_to_cpup(prop); + return 1; +} + +static unsigned long radix_memory_block_size(void) +{ + unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE; + + /* + * OPAL firmware feature is set by now. Hence we are ok + * to test OPAL feature. + */ + if (firmware_has_feature(FW_FEATURE_OPAL)) + mem_block_size = 1UL * 1024 * 1024 * 1024; + else + of_scan_flat_dt(probe_memory_block_size, &mem_block_size); + + return mem_block_size; +} + +#else /* CONFIG_MEMORY_HOTPLUG */ + +static unsigned long radix_memory_block_size(void) +{ + return 1UL * 1024 * 1024 * 1024; +} + +#endif /* CONFIG_MEMORY_HOTPLUG */ + + void __init radix__early_init_devtree(void) { int rc; @@ -493,17 +551,27 @@ void __init radix__early_init_devtree(void) * Try to find the available page sizes in the device-tree */ rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); - if (rc != 0) /* Found */ - goto found; + if (!rc) { + /* + * No page size details found in device tree. + * Let's assume we have page 4k and 64k support + */ + mmu_psize_defs[MMU_PAGE_4K].shift = 12; + mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; + + mmu_psize_defs[MMU_PAGE_64K].shift = 16; + mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; + } + /* - * let's assume we have page 4k and 64k support + * Max mapping size used when mapping pages. We don't use + * ppc_md.memory_block_size() here because this get called + * early and we don't have machine probe called yet. Also + * the pseries implementation only check for ibm,lmb-size. + * All hypervisor supporting radix do expose that device + * tree node. */ - mmu_psize_defs[MMU_PAGE_4K].shift = 12; - mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; - - mmu_psize_defs[MMU_PAGE_64K].shift = 16; - mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; -found: + radix_mem_block_size = radix_memory_block_size(); return; } @@ -855,7 +923,8 @@ int __meminit radix__create_section_mapping(unsigned long start, return -1; } - return create_physical_mapping(__pa(start), __pa(end), nid, prot); + return create_physical_mapping(__pa(start), __pa(end), + radix_mem_block_size, nid, prot); } int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 3bc188da82ba..7fcb88623081 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -399,7 +399,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE static unsigned long pnv_memory_block_size(void) { - return 256UL * 1024 * 1024; + /* + * We map the kernel linear region with 1GB large pages on radix. For + * memory hot unplug to work our memory block size must be at least + * this size. + */ + if (radix_enabled()) + return radix_mem_block_size; + else + return 256UL * 1024 * 1024; } #endif -- cgit v1.2.3 From c37a63afc429ce959402168f67e4f094ab639ace Mon Sep 17 00:00:00 2001 From: Santosh Sivaraj Date: Thu, 9 Jul 2020 19:21:41 +0530 Subject: powerpc/mce: Add MCE notification chain Introduce notification chain which lets us know about uncorrected memory errors(UE). This would help prospective users in pmem or nvdimm subsystem to track bad blocks for better handling of persistent memory allocations. Signed-off-by: Santosh Sivaraj Signed-off-by: Ganesh Goudar Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709135142.721504-1-santosh@fossix.org --- arch/powerpc/include/asm/mce.h | 2 ++ arch/powerpc/kernel/mce.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index 376a395daf32..7bdd0cd4f2de 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -220,6 +220,8 @@ extern void machine_check_print_event_info(struct machine_check_event *evt, unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr); extern void mce_common_process_ue(struct pt_regs *regs, struct mce_error_info *mce_err); +int mce_register_notifier(struct notifier_block *nb); +int mce_unregister_notifier(struct notifier_block *nb); #ifdef CONFIG_PPC_BOOK3S_64 void flush_and_reload_slb(void); #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index fd90c0eda229..b7b3ed4e6193 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -49,6 +49,20 @@ static struct irq_work mce_ue_event_irq_work = { DECLARE_WORK(mce_ue_event_work, machine_process_ue_event); +static BLOCKING_NOTIFIER_HEAD(mce_notifier_list); + +int mce_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&mce_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(mce_register_notifier); + +int mce_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&mce_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(mce_unregister_notifier); + static void mce_set_error_info(struct machine_check_event *mce, struct mce_error_info *mce_err) { @@ -278,6 +292,7 @@ static void machine_process_ue_event(struct work_struct *work) while (__this_cpu_read(mce_ue_count) > 0) { index = __this_cpu_read(mce_ue_count) - 1; evt = this_cpu_ptr(&mce_ue_event_queue[index]); + blocking_notifier_call_chain(&mce_notifier_list, 0, evt); #ifdef CONFIG_MEMORY_FAILURE /* * This should probably queued elsewhere, but -- cgit v1.2.3 From 85343a8da2d969df1a10ada8f7cb857d52ea70a6 Mon Sep 17 00:00:00 2001 From: Santosh Sivaraj Date: Thu, 9 Jul 2020 19:21:42 +0530 Subject: powerpc/papr/scm: Add bad memory ranges to nvdimm bad ranges Subscribe to the MCE notification and add the physical address which generated a memory error to nvdimm bad range. Signed-off-by: Santosh Sivaraj Reviewed-by: Mahesh Salgaonkar Reviewed-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709135142.721504-2-santosh@fossix.org --- arch/powerpc/platforms/pseries/papr_scm.c | 96 ++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 66c19c0fe566..8fd441d32487 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -13,9 +13,11 @@ #include #include #include +#include #include #include +#include #define BIND_ANY_ADDR (~0ul) @@ -80,6 +82,7 @@ struct papr_scm_priv { struct resource res; struct nd_region *region; struct nd_interleave_set nd_set; + struct list_head region_list; /* Protect dimm health data from concurrent read/writes */ struct mutex health_mutex; @@ -91,6 +94,9 @@ struct papr_scm_priv { u64 health_bitmap; }; +LIST_HEAD(papr_nd_regions); +DEFINE_MUTEX(papr_ndr_lock); + static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; @@ -759,6 +765,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) dev_info(dev, "Region registered with target node %d and online node %d", target_nid, online_nid); + mutex_lock(&papr_ndr_lock); + list_add_tail(&p->region_list, &papr_nd_regions); + mutex_unlock(&papr_ndr_lock); + return 0; err: nvdimm_bus_unregister(p->bus); @@ -766,6 +776,68 @@ err: nvdimm_bus_unregister(p->bus); return -ENXIO; } +static void papr_scm_add_badblock(struct nd_region *region, + struct nvdimm_bus *bus, u64 phys_addr) +{ + u64 aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES); + + if (nvdimm_bus_add_badrange(bus, aligned_addr, L1_CACHE_BYTES)) { + pr_err("Bad block registration for 0x%llx failed\n", phys_addr); + return; + } + + pr_debug("Add memory range (0x%llx - 0x%llx) as bad range\n", + aligned_addr, aligned_addr + L1_CACHE_BYTES); + + nvdimm_region_notify(region, NVDIMM_REVALIDATE_POISON); +} + +static int handle_mce_ue(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct machine_check_event *evt = data; + struct papr_scm_priv *p; + u64 phys_addr; + bool found = false; + + if (evt->error_type != MCE_ERROR_TYPE_UE) + return NOTIFY_DONE; + + if (list_empty(&papr_nd_regions)) + return NOTIFY_DONE; + + /* + * The physical address obtained here is PAGE_SIZE aligned, so get the + * exact address from the effective address + */ + phys_addr = evt->u.ue_error.physical_address + + (evt->u.ue_error.effective_address & ~PAGE_MASK); + + if (!evt->u.ue_error.physical_address_provided || + !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT))) + return NOTIFY_DONE; + + /* mce notifier is called from a process context, so mutex is safe */ + mutex_lock(&papr_ndr_lock); + list_for_each_entry(p, &papr_nd_regions, region_list) { + if (phys_addr >= p->res.start && phys_addr <= p->res.end) { + found = true; + break; + } + } + + if (found) + papr_scm_add_badblock(p->region, p->bus, phys_addr); + + mutex_unlock(&papr_ndr_lock); + + return found ? NOTIFY_OK : NOTIFY_DONE; +} + +static struct notifier_block mce_ue_nb = { + .notifier_call = handle_mce_ue +}; + static int papr_scm_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; @@ -866,6 +938,10 @@ static int papr_scm_remove(struct platform_device *pdev) { struct papr_scm_priv *p = platform_get_drvdata(pdev); + mutex_lock(&papr_ndr_lock); + list_del(&p->region_list); + mutex_unlock(&papr_ndr_lock); + nvdimm_bus_unregister(p->bus); drc_pmem_unbind(p); kfree(p->bus_desc.provider_name); @@ -889,7 +965,25 @@ static struct platform_driver papr_scm_driver = { }, }; -module_platform_driver(papr_scm_driver); +static int __init papr_scm_init(void) +{ + int ret; + + ret = platform_driver_register(&papr_scm_driver); + if (!ret) + mce_register_notifier(&mce_ue_nb); + + return ret; +} +module_init(papr_scm_init); + +static void __exit papr_scm_exit(void) +{ + mce_unregister_notifier(&mce_ue_nb); + platform_driver_unregister(&papr_scm_driver); +} +module_exit(papr_scm_exit); + MODULE_DEVICE_TABLE(of, papr_scm_match); MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); -- cgit v1.2.3 From d79e7a5f26f1d179cbb915a8bf2469b6d7431c29 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:24 +0530 Subject: powerpc/book3s64/pkeys: Use PVR check instead of cpu feature We are wrongly using CPU_FTRS_POWER8 to check for P8 support. Instead, we should use PVR value. Now considering we are using CPU_FTRS_POWER8, that implies we returned true for P9 with older firmware. Keep the same behavior by checking for P9 PVR value. Fixes: cf43d3b26452 ("powerpc: Enable pkey subsystem") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-2-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index d174106bab67..82ace6acb0aa 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -83,13 +83,17 @@ static int pkey_initialize(void) scan_pkey_feature(); /* - * Let's assume 32 pkeys on P8 bare metal, if its not defined by device - * tree. We make this exception since skiboot forgot to expose this - * property on power8. + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device + * tree. We make this exception since some version of skiboot forgot to + * expose this property on power8/9. */ - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) && - cpu_has_feature(CPU_FTRS_POWER8)) - pkeys_total = 32; + if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) { + unsigned long pvr = mfspr(SPRN_PVR); + + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) + pkeys_total = 32; + } /* * Adjust the upper limit, based on the number of bits supported by -- cgit v1.2.3 From 33699023f51f96ac9be38747e64967ea05e00bab Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:25 +0530 Subject: powerpc/book3s64/pkeys: Fixup bit numbering This number the pkey bit such that it is easy to follow. PKEY_BIT0 is the lower order bit. This makes further changes easy to follow. No functional change in this patch other than linux page table for hash translation now maps pkeys differently. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-3-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 9 +++++---- arch/powerpc/include/asm/book3s/64/hash-64k.h | 8 ++++---- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 8 ++++---- arch/powerpc/include/asm/pkeys.h | 24 ++++++++++++------------ 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 3f9ae3585ab9..f889d56bf8cf 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -57,11 +57,12 @@ #define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT) /* memory key bits, only 8 keys supported */ -#define H_PTE_PKEY_BIT0 0 -#define H_PTE_PKEY_BIT1 0 +#define H_PTE_PKEY_BIT4 0 +#define H_PTE_PKEY_BIT3 0 #define H_PTE_PKEY_BIT2 _RPAGE_RSV3 -#define H_PTE_PKEY_BIT3 _RPAGE_RSV4 -#define H_PTE_PKEY_BIT4 _RPAGE_RSV5 +#define H_PTE_PKEY_BIT1 _RPAGE_RSV4 +#define H_PTE_PKEY_BIT0 _RPAGE_RSV5 + /* * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 0729c034e56f..0a15fd14cf72 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -36,11 +36,11 @@ #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ /* memory key bits. */ -#define H_PTE_PKEY_BIT0 _RPAGE_RSV1 -#define H_PTE_PKEY_BIT1 _RPAGE_RSV2 +#define H_PTE_PKEY_BIT4 _RPAGE_RSV1 +#define H_PTE_PKEY_BIT3 _RPAGE_RSV2 #define H_PTE_PKEY_BIT2 _RPAGE_RSV3 -#define H_PTE_PKEY_BIT3 _RPAGE_RSV4 -#define H_PTE_PKEY_BIT4 _RPAGE_RSV5 +#define H_PTE_PKEY_BIT1 _RPAGE_RSV4 +#define H_PTE_PKEY_BIT0 _RPAGE_RSV5 /* * We need to differentiate between explicit huge page and THP huge diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 3fa1b962dc27..58fcc959f9d5 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -86,8 +86,8 @@ #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) #define HPTE_R_TS ASM_CONST(0x4000000000000000) #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) -#define HPTE_R_KEY_BIT0 ASM_CONST(0x2000000000000000) -#define HPTE_R_KEY_BIT1 ASM_CONST(0x1000000000000000) +#define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000) +#define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000) #define HPTE_R_RPN_SHIFT 12 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000) @@ -103,8 +103,8 @@ #define HPTE_R_R ASM_CONST(0x0000000000000100) #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) #define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800) -#define HPTE_R_KEY_BIT3 ASM_CONST(0x0000000000000400) -#define HPTE_R_KEY_BIT4 ASM_CONST(0x0000000000000200) +#define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400) +#define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200) #define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI) #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 2fe6cae14d10..fec358782c04 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -35,11 +35,11 @@ static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) if (static_branch_likely(&pkey_disabled)) return 0x0UL; - return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT4 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT3 : 0x0UL) | + return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | + ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) | ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT1 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT0 : 0x0UL)); + ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) | + ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); } static inline int vma_pkey(struct vm_area_struct *vma) @@ -53,20 +53,20 @@ static inline int vma_pkey(struct vm_area_struct *vma) static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) { - return (((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) | + return (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) | ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL)); + ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL)); } static inline u16 pte_to_pkey_bits(u64 pteflags) { - return (((pteflags & H_PTE_PKEY_BIT0) ? 0x10 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT1) ? 0x8 : 0x0UL) | + return (((pteflags & H_PTE_PKEY_BIT4) ? 0x10 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT3) ? 0x8 : 0x0UL) | ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT3) ? 0x2 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT4) ? 0x1 : 0x0UL)); + ((pteflags & H_PTE_PKEY_BIT1) ? 0x2 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT0) ? 0x1 : 0x0UL)); } #define pkey_alloc_mask(pkey) (0x1 << pkey) -- cgit v1.2.3 From b9658f83e721ddfcee3e08b16a6628420de424c3 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:26 +0530 Subject: powerpc/book3s64/pkeys: pkeys are supported only on hash on book3s. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-4-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/hash-pkey.h | 32 ++++++++++++++++++++++ arch/powerpc/include/asm/book3s/64/pkeys.h | 25 +++++++++++++++++ arch/powerpc/include/asm/pkeys.h | 37 +++++--------------------- 3 files changed, 64 insertions(+), 30 deletions(-) create mode 100644 arch/powerpc/include/asm/book3s/64/hash-pkey.h create mode 100644 arch/powerpc/include/asm/book3s/64/pkeys.h diff --git a/arch/powerpc/include/asm/book3s/64/hash-pkey.h b/arch/powerpc/include/asm/book3s/64/hash-pkey.h new file mode 100644 index 000000000000..795010897e5d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-pkey.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H +#define _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H + +static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags) +{ + return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | + ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) | + ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) | + ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) | + ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); +} + +static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) +{ + return (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL)); +} + +static inline u16 hash__pte_to_pkey_bits(u64 pteflags) +{ + return (((pteflags & H_PTE_PKEY_BIT4) ? 0x10 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT3) ? 0x8 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT1) ? 0x2 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT0) ? 0x1 : 0x0UL)); +} + +#endif diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h new file mode 100644 index 000000000000..8174662a9173 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/pkeys.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _ASM_POWERPC_BOOK3S_64_PKEYS_H +#define _ASM_POWERPC_BOOK3S_64_PKEYS_H + +#include + +static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) +{ + if (static_branch_likely(&pkey_disabled)) + return 0x0UL; + + if (radix_enabled()) + BUG(); + return hash__vmflag_to_pte_pkey_bits(vm_flags); +} + +static inline u16 pte_to_pkey_bits(u64 pteflags) +{ + if (radix_enabled()) + BUG(); + return hash__pte_to_pkey_bits(pteflags); +} + +#endif /*_ASM_POWERPC_KEYS_H */ diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index fec358782c04..d06ec0948964 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -25,23 +25,18 @@ extern u32 reserved_allocation_mask; /* bits set for reserved keys */ PKEY_DISABLE_WRITE | \ PKEY_DISABLE_EXECUTE) +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#else +#error "Not supported" +#endif + + static inline u64 pkey_to_vmflag_bits(u16 pkey) { return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS); } -static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) -{ - if (static_branch_likely(&pkey_disabled)) - return 0x0UL; - - return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) | - ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); -} - static inline int vma_pkey(struct vm_area_struct *vma) { if (static_branch_likely(&pkey_disabled)) @@ -51,24 +46,6 @@ static inline int vma_pkey(struct vm_area_struct *vma) #define arch_max_pkey() pkeys_total -static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) -{ - return (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL)); -} - -static inline u16 pte_to_pkey_bits(u64 pteflags) -{ - return (((pteflags & H_PTE_PKEY_BIT4) ? 0x10 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT3) ? 0x8 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT1) ? 0x2 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT0) ? 0x1 : 0x0UL)); -} - #define pkey_alloc_mask(pkey) (0x1 << pkey) #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) -- cgit v1.2.3 From ee8b39331f89950b0a011c7965db5694f0153166 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:27 +0530 Subject: powerpc/book3s64/pkeys: Move pkey related bits in the linux page table To keep things simple, all the pkey related bits are kept together in linux page table for 64K config with hash translation. With hash-4k kernel requires 4 bits to store slots details. This is done by overloading some of the RPN bits for storing the slot details. Due to this PKEY_BIT0 on the 4K config is used for storing hash slot details. 64K before |....|RSV1| RSV2| RSV3 | RSV4 | RPN44| RPN43 |.... | RSV5| |....| P4 | P3 | P2 | P1 | Busy | HASHPTE |.... | P0 | after |....|RSV1| RSV2| RSV3 | RSV4 | RPN44 | RPN43 |.... | RSV5 | |....| P4 | P3 | P2 | P1 | P0 | HASHPTE |.... | Busy | 4k before |....| RSV1 | RSV2 | RSV3 | RSV4 | RPN44| RPN43.... | RSV5| |....| Busy | HASHPTE | P2 | P1 | F_SEC| F_GIX.... | P0 | after |....| RSV1 | RSV2| RSV3 | RSV4 | Free | RPN43.... | RSV5 | |....| HASHPTE | P2 | P1 | P0 | F_SEC| F_GIX.... | BUSY | Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-5-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/hash-4k.h | 16 ++++++++-------- arch/powerpc/include/asm/book3s/64/hash-64k.h | 12 ++++++------ arch/powerpc/include/asm/book3s/64/pgtable.h | 17 ++++++++--------- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index f889d56bf8cf..082b98808701 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -34,11 +34,11 @@ #define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE) #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE) -#define H_PAGE_F_GIX_SHIFT 53 -#define H_PAGE_F_SECOND _RPAGE_RPN44 /* HPTE is in 2ndary HPTEG */ -#define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41) -#define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */ -#define H_PAGE_HASHPTE _RPAGE_RSV2 /* software: PTE & hash are busy */ +#define H_PAGE_F_GIX_SHIFT _PAGE_PA_MAX +#define H_PAGE_F_SECOND _RPAGE_PKEY_BIT0 /* HPTE is in 2ndary HPTEG */ +#define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41) +#define H_PAGE_BUSY _RPAGE_RSV1 +#define H_PAGE_HASHPTE _RPAGE_PKEY_BIT4 /* PTE flags to conserve for HPTE identification */ #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \ @@ -59,9 +59,9 @@ /* memory key bits, only 8 keys supported */ #define H_PTE_PKEY_BIT4 0 #define H_PTE_PKEY_BIT3 0 -#define H_PTE_PKEY_BIT2 _RPAGE_RSV3 -#define H_PTE_PKEY_BIT1 _RPAGE_RSV4 -#define H_PTE_PKEY_BIT0 _RPAGE_RSV5 +#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT3 +#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT2 +#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT1 /* diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 0a15fd14cf72..f20de1149ebe 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -32,15 +32,15 @@ */ #define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */ #define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */ -#define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */ +#define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */ #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ /* memory key bits. */ -#define H_PTE_PKEY_BIT4 _RPAGE_RSV1 -#define H_PTE_PKEY_BIT3 _RPAGE_RSV2 -#define H_PTE_PKEY_BIT2 _RPAGE_RSV3 -#define H_PTE_PKEY_BIT1 _RPAGE_RSV4 -#define H_PTE_PKEY_BIT0 _RPAGE_RSV5 +#define H_PTE_PKEY_BIT4 _RPAGE_PKEY_BIT4 +#define H_PTE_PKEY_BIT3 _RPAGE_PKEY_BIT3 +#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT2 +#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT1 +#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT0 /* * We need to differentiate between explicit huge page and THP huge diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 25c3cb8272c0..495fc0ccb453 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -32,11 +32,13 @@ #define _RPAGE_SW1 0x00800 #define _RPAGE_SW2 0x00400 #define _RPAGE_SW3 0x00200 -#define _RPAGE_RSV1 0x1000000000000000UL -#define _RPAGE_RSV2 0x0800000000000000UL -#define _RPAGE_RSV3 0x0400000000000000UL -#define _RPAGE_RSV4 0x0200000000000000UL -#define _RPAGE_RSV5 0x00040UL +#define _RPAGE_RSV1 0x00040UL + +#define _RPAGE_PKEY_BIT4 0x1000000000000000UL +#define _RPAGE_PKEY_BIT3 0x0800000000000000UL +#define _RPAGE_PKEY_BIT2 0x0400000000000000UL +#define _RPAGE_PKEY_BIT1 0x0200000000000000UL +#define _RPAGE_PKEY_BIT0 0x0100000000000000UL #define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */ #define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */ @@ -58,13 +60,12 @@ */ #define _RPAGE_RPN0 0x01000 #define _RPAGE_RPN1 0x02000 -#define _RPAGE_RPN44 0x0100000000000000UL #define _RPAGE_RPN43 0x0080000000000000UL #define _RPAGE_RPN42 0x0040000000000000UL #define _RPAGE_RPN41 0x0020000000000000UL /* Max physical address bit as per radix table */ -#define _RPAGE_PA_MAX 57 +#define _RPAGE_PA_MAX 56 /* * Max physical address bit we will use for now. @@ -125,8 +126,6 @@ _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) -#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ - H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) /* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable -- cgit v1.2.3 From 1f404058e2911afe08417ef82f17aba6adccfc63 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:28 +0530 Subject: powerpc/book3s64/pkeys: Explain key 1 reservation details This explains the details w.r.t key 1. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-6-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 82ace6acb0aa..ee27f18fac32 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -128,7 +128,10 @@ static int pkey_initialize(void) #else os_reserved = 0; #endif - /* Bits are in LE format. */ + /* + * key 1 is recommended not to be used. PowerISA(3.0) page 1015, + * programming note. + */ reserved_allocation_mask = (0x1 << 1) | (0x1 << execute_only_key); /* register mask is in BE format */ -- cgit v1.2.3 From f491fe3fb41eafc7a159874040e032ad41ade210 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:29 +0530 Subject: powerpc/book3s64/pkeys: Simplify the key initialization Add documentation explaining the execute_only_key. The reservation and initialization mask details are also explained in this patch. No functional change in this patch. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-7-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 199 +++++++++++++++++++++++---------------- 1 file changed, 116 insertions(+), 83 deletions(-) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index ee27f18fac32..2dea8c29df57 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -15,48 +15,80 @@ DEFINE_STATIC_KEY_TRUE(pkey_disabled); int pkeys_total; /* Total pkeys as per device tree */ u32 initial_allocation_mask; /* Bits set for the initially allocated keys */ -u32 reserved_allocation_mask; /* Bits set for reserved keys */ +/* + * Keys marked in the reservation list cannot be allocated by userspace + */ +u32 reserved_allocation_mask; static bool pkey_execute_disable_supported; -static bool pkeys_devtree_defined; /* property exported by device tree */ -static u64 pkey_amr_mask; /* Bits in AMR not to be touched */ -static u64 pkey_iamr_mask; /* Bits in AMR not to be touched */ -static u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */ +/* + * Even if we allocate keys with sys_pkey_alloc(), we need to make sure + * other thread still find the access denied using the same keys. + */ +static u64 default_amr = ~0x0UL; +static u64 default_iamr = 0x5555555555555555UL; + +/* Allow all keys to be modified by default */ +static u64 default_uamor = ~0x0UL; +/* + * Key used to implement PROT_EXEC mmap. Denies READ/WRITE + * We pick key 2 because 0 is special key and 1 is reserved as per ISA. + */ static int execute_only_key = 2; + #define AMR_BITS_PER_PKEY 2 #define AMR_RD_BIT 0x1UL #define AMR_WR_BIT 0x2UL #define IAMR_EX_BIT 0x1UL -#define PKEY_REG_BITS (sizeof(u64)*8) +#define PKEY_REG_BITS (sizeof(u64) * 8) #define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY)) -static void scan_pkey_feature(void) +static int scan_pkey_feature(void) { u32 vals[2]; + int pkeys_total = 0; struct device_node *cpu; + /* + * Pkey is not supported with Radix translation. + */ + if (radix_enabled()) + return 0; + cpu = of_find_node_by_type(NULL, "cpu"); if (!cpu) - return; + return 0; if (of_property_read_u32_array(cpu, - "ibm,processor-storage-keys", vals, 2)) - return; + "ibm,processor-storage-keys", vals, 2) == 0) { + /* + * Since any pkey can be used for data or execute, we will + * just treat all keys as equal and track them as one entity. + */ + pkeys_total = vals[0]; + } else { + + /* + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device + * tree. We make this exception since some version of skiboot forgot to + * expose this property on power8/9. + */ + if (!firmware_has_feature(FW_FEATURE_LPAR)) { + unsigned long pvr = mfspr(SPRN_PVR); + + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) + pkeys_total = 32; + } + } /* - * Since any pkey can be used for data or execute, we will just treat - * all keys as equal and track them as one entity. + * Adjust the upper limit, based on the number of bits supported by + * arch-neutral code. */ - pkeys_total = vals[0]; - pkeys_devtree_defined = true; -} - -static inline bool pkey_mmu_enabled(void) -{ - if (firmware_has_feature(FW_FEATURE_LPAR)) - return pkeys_total; - else - return cpu_has_feature(CPU_FTR_PKEY); + pkeys_total = min_t(int, pkeys_total, + ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1)); + return pkeys_total; } static int pkey_initialize(void) @@ -80,35 +112,13 @@ static int pkey_initialize(void) != (sizeof(u64) * BITS_PER_BYTE)); /* scan the device tree for pkey feature */ - scan_pkey_feature(); - - /* - * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device - * tree. We make this exception since some version of skiboot forgot to - * expose this property on power8/9. - */ - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) { - unsigned long pvr = mfspr(SPRN_PVR); - - if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || - PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) - pkeys_total = 32; - } - - /* - * Adjust the upper limit, based on the number of bits supported by - * arch-neutral code. - */ - pkeys_total = min_t(int, pkeys_total, - ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)+1)); - - if (!pkey_mmu_enabled() || radix_enabled() || !pkeys_total) - static_branch_enable(&pkey_disabled); - else + pkeys_total = scan_pkey_feature(); + if (pkeys_total) static_branch_disable(&pkey_disabled); - - if (static_branch_likely(&pkey_disabled)) + else { + static_branch_enable(&pkey_disabled); return 0; + } /* * The device tree cannot be relied to indicate support for @@ -122,48 +132,71 @@ static int pkey_initialize(void) #ifdef CONFIG_PPC_4K_PAGES /* * The OS can manage only 8 pkeys due to its inability to represent them - * in the Linux 4K PTE. + * in the Linux 4K PTE. Mark all other keys reserved. */ os_reserved = pkeys_total - 8; #else os_reserved = 0; #endif - /* - * key 1 is recommended not to be used. PowerISA(3.0) page 1015, - * programming note. - */ - reserved_allocation_mask = (0x1 << 1) | (0x1 << execute_only_key); - - /* register mask is in BE format */ - pkey_amr_mask = ~0x0ul; - pkey_amr_mask &= ~(0x3ul << pkeyshift(0)); - - pkey_iamr_mask = ~0x0ul; - pkey_iamr_mask &= ~(0x3ul << pkeyshift(0)); - pkey_iamr_mask &= ~(0x3ul << pkeyshift(execute_only_key)); - - pkey_uamor_mask = ~0x0ul; - pkey_uamor_mask &= ~(0x3ul << pkeyshift(0)); - pkey_uamor_mask &= ~(0x3ul << pkeyshift(execute_only_key)); - - /* mark the rest of the keys as reserved and hence unavailable */ - for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) { - reserved_allocation_mask |= (0x1 << i); - pkey_uamor_mask &= ~(0x3ul << pkeyshift(i)); - } - initial_allocation_mask = reserved_allocation_mask | (0x1 << 0); if (unlikely((pkeys_total - os_reserved) <= execute_only_key)) { /* * Insufficient number of keys to support * execute only key. Mark it unavailable. - * Any AMR, UAMOR, IAMR bit set for - * this key is irrelevant since this key - * can never be allocated. */ execute_only_key = -1; + } else { + /* + * Mark the execute_only_pkey as not available for + * user allocation via pkey_alloc. + */ + reserved_allocation_mask |= (0x1 << execute_only_key); + + /* + * Deny READ/WRITE for execute_only_key. + * Allow execute in IAMR. + */ + default_amr |= (0x3ul << pkeyshift(execute_only_key)); + default_iamr &= ~(0x1ul << pkeyshift(execute_only_key)); + + /* + * Clear the uamor bits for this key. + */ + default_uamor &= ~(0x3ul << pkeyshift(execute_only_key)); } + /* + * Allow access for only key 0. And prevent any other modification. + */ + default_amr &= ~(0x3ul << pkeyshift(0)); + default_iamr &= ~(0x1ul << pkeyshift(0)); + default_uamor &= ~(0x3ul << pkeyshift(0)); + /* + * key 0 is special in that we want to consider it an allocated + * key which is preallocated. We don't allow changing AMR bits + * w.r.t key 0. But one can pkey_free(key0) + */ + initial_allocation_mask |= (0x1 << 0); + + /* + * key 1 is recommended not to be used. PowerISA(3.0) page 1015, + * programming note. + */ + reserved_allocation_mask |= (0x1 << 1); + + /* + * Prevent the usage of OS reserved the keys. Update UAMOR + * for those keys. + */ + for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) { + reserved_allocation_mask |= (0x1 << i); + default_uamor &= ~(0x3ul << pkeyshift(i)); + } + /* + * Prevent the allocation of reserved keys too. + */ + initial_allocation_mask |= reserved_allocation_mask; + return 0; } @@ -305,13 +338,13 @@ void thread_pkey_regs_init(struct thread_struct *thread) if (static_branch_likely(&pkey_disabled)) return; - thread->amr = pkey_amr_mask; - thread->iamr = pkey_iamr_mask; - thread->uamor = pkey_uamor_mask; + thread->amr = default_amr; + thread->iamr = default_iamr; + thread->uamor = default_uamor; - write_uamor(pkey_uamor_mask); - write_amr(pkey_amr_mask); - write_iamr(pkey_iamr_mask); + write_amr(default_amr); + write_iamr(default_iamr); + write_uamor(default_uamor); } int __execute_only_pkey(struct mm_struct *mm) -- cgit v1.2.3 From 718d9b380174eb8fe16d67769395737b79654a02 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:30 +0530 Subject: powerpc/book3s64/pkeys: Prevent key 1 modification from userspace. Key 1 is marked reserved by ISA. Setup uamor to prevent userspace modification of the same. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-8-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 2dea8c29df57..e2bf93901bd0 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -183,6 +183,7 @@ static int pkey_initialize(void) * programming note. */ reserved_allocation_mask |= (0x1 << 1); + default_uamor &= ~(0x3ul << pkeyshift(1)); /* * Prevent the usage of OS reserved the keys. Update UAMOR -- cgit v1.2.3 From a24204c307962214996627e3f4caa8772b9b0cf4 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:31 +0530 Subject: powerpc/book3s64/pkeys: kill cpu feature key CPU_FTR_PKEY We don't use CPU_FTR_PKEY anymore. Remove the feature bit and mark it free. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-9-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/cputable.h | 13 ++++++------- arch/powerpc/kernel/dt_cpu_ftrs.c | 6 ------ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index bac2252c839e..dd0a2e77a695 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -198,7 +198,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0000000080000000) #define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0000000100000000) #define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0000000200000000) -#define CPU_FTR_PKEY LONG_ASM_CONST(0x0000000400000000) +/* LONG_ASM_CONST(0x0000000400000000) Free */ #define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0000000800000000) #define CPU_FTR_TM LONG_ASM_CONST(0x0000001000000000) #define CPU_FTR_CFAR LONG_ASM_CONST(0x0000002000000000) @@ -438,7 +438,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ - CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX | CPU_FTR_PKEY) + CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) #define CPU_FTRS_POWER8 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -448,7 +448,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ - CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY) + CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP ) #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) #define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ @@ -459,8 +459,8 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ - CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ - CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR) + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_P9_TLBIE_STQ_BUG | \ + CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR) #define CPU_FTRS_POWER9_DD2_0 (CPU_FTRS_POWER9 | CPU_FTR_P9_RADIX_PREFETCH_BUG) #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | \ CPU_FTR_P9_RADIX_PREFETCH_BUG | \ @@ -477,8 +477,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ - CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \ - CPU_FTR_ARCH_31) + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index ac650c233cd9..554bec785f6a 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -776,12 +776,6 @@ static __init void cpufeatures_cpu_quirks(void) } update_tlbie_feature_flag(version); - /* - * PKEY was not in the initial base or feature node - * specification, but it should become optional in the next - * cpu feature version sequence. - */ - cur_cpu_spec->cpu_features |= CPU_FTR_PKEY; } static void __init cpufeatures_setup_finished(void) -- cgit v1.2.3 From a4678d4b477c3d2901f101986ca01406f3b7eaea Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:32 +0530 Subject: powerpc/book3s64/pkeys: Simplify pkey disable branch Make the default value FALSE (pkey enabled) and set to TRUE when we find the total number of keys supported to be zero. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-10-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/pkeys.h | 2 +- arch/powerpc/mm/book3s64/pkeys.c | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index d06ec0948964..f984bfac814a 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -11,7 +11,7 @@ #include #include -DECLARE_STATIC_KEY_TRUE(pkey_disabled); +DECLARE_STATIC_KEY_FALSE(pkey_disabled); extern int pkeys_total; /* total pkeys as per device tree */ extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */ extern u32 reserved_allocation_mask; /* bits set for reserved keys */ diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index e2bf93901bd0..8bdb56ce8b43 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -12,7 +12,7 @@ #include #include -DEFINE_STATIC_KEY_TRUE(pkey_disabled); +DEFINE_STATIC_KEY_FALSE(pkey_disabled); int pkeys_total; /* Total pkeys as per device tree */ u32 initial_allocation_mask; /* Bits set for the initially allocated keys */ /* @@ -113,9 +113,8 @@ static int pkey_initialize(void) /* scan the device tree for pkey feature */ pkeys_total = scan_pkey_feature(); - if (pkeys_total) - static_branch_disable(&pkey_disabled); - else { + if (!pkeys_total) { + /* No support for pkey. Mark it disabled */ static_branch_enable(&pkey_disabled); return 0; } -- cgit v1.2.3 From c529afd7cbc71ae1dc44a31efc7c1c9db3c3a143 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:33 +0530 Subject: powerpc/book3s64/pkeys: Convert pkey_total to num_pkey num_pkey now represents max number of keys supported such that we return to userspace 0 - num_pkey - 1. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-11-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/pkeys.h | 7 +++++-- arch/powerpc/mm/book3s64/pkeys.c | 14 +++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index f984bfac814a..26b20061512f 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -12,7 +12,7 @@ #include DECLARE_STATIC_KEY_FALSE(pkey_disabled); -extern int pkeys_total; /* total pkeys as per device tree */ +extern int num_pkey; extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */ extern u32 reserved_allocation_mask; /* bits set for reserved keys */ @@ -44,7 +44,10 @@ static inline int vma_pkey(struct vm_area_struct *vma) return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT; } -#define arch_max_pkey() pkeys_total +static inline int arch_max_pkey(void) +{ + return num_pkey; +} #define pkey_alloc_mask(pkey) (0x1 << pkey) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 8bdb56ce8b43..65926c00c8a6 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -13,7 +13,7 @@ #include DEFINE_STATIC_KEY_FALSE(pkey_disabled); -int pkeys_total; /* Total pkeys as per device tree */ +int num_pkey; /* Max number of pkeys supported */ u32 initial_allocation_mask; /* Bits set for the initially allocated keys */ /* * Keys marked in the reservation list cannot be allocated by userspace @@ -93,7 +93,7 @@ static int scan_pkey_feature(void) static int pkey_initialize(void) { - int os_reserved, i; + int pkeys_total, i; /* * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral @@ -133,12 +133,12 @@ static int pkey_initialize(void) * The OS can manage only 8 pkeys due to its inability to represent them * in the Linux 4K PTE. Mark all other keys reserved. */ - os_reserved = pkeys_total - 8; + num_pkey = min(8, pkeys_total); #else - os_reserved = 0; + num_pkey = pkeys_total; #endif - if (unlikely((pkeys_total - os_reserved) <= execute_only_key)) { + if (unlikely(num_pkey <= execute_only_key)) { /* * Insufficient number of keys to support * execute only key. Mark it unavailable. @@ -185,10 +185,10 @@ static int pkey_initialize(void) default_uamor &= ~(0x3ul << pkeyshift(1)); /* - * Prevent the usage of OS reserved the keys. Update UAMOR + * Prevent the usage of OS reserved keys. Update UAMOR * for those keys. */ - for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) { + for (i = num_pkey; i < pkeys_total; i++) { reserved_allocation_mask |= (0x1 << i); default_uamor &= ~(0x3ul << pkeyshift(i)); } -- cgit v1.2.3 From 3c8ab47362fe9a74f61b48efe957666a423c55a2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:34 +0530 Subject: powerpc/book3s64/pkeys: Make initial_allocation_mask static initial_allocation_mask is not used outside this file. Also mark reserved_allocation_mask and initial_allocation_mask __ro_after_init; Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-12-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/pkeys.h | 1 - arch/powerpc/mm/book3s64/pkeys.c | 7 +++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 26b20061512f..dd32e30f6767 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -13,7 +13,6 @@ DECLARE_STATIC_KEY_FALSE(pkey_disabled); extern int num_pkey; -extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */ extern u32 reserved_allocation_mask; /* bits set for reserved keys */ #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \ diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 65926c00c8a6..e9fd0db7fb3c 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -14,11 +14,14 @@ DEFINE_STATIC_KEY_FALSE(pkey_disabled); int num_pkey; /* Max number of pkeys supported */ -u32 initial_allocation_mask; /* Bits set for the initially allocated keys */ /* * Keys marked in the reservation list cannot be allocated by userspace */ -u32 reserved_allocation_mask; +u32 reserved_allocation_mask __ro_after_init; + +/* Bits set for the initially allocated keys */ +static u32 initial_allocation_mask __ro_after_init; + static bool pkey_execute_disable_supported; /* * Even if we allocate keys with sys_pkey_alloc(), we need to make sure -- cgit v1.2.3 From 3e4352aeb8b17eb1040ba288f586620e8294389d Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:35 +0530 Subject: powerpc/book3s64/pkeys: Mark all the pkeys above max pkey as reserved The hypervisor can return less than max allowed pkey (for ex: 31) instead of 32. We should mark all the pkeys above max allowed as reserved so that we avoid the allocation of the wrong pkey(for ex: key 31 in the above case) by userspace. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-13-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index e9fd0db7fb3c..d84bf55369a3 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -189,9 +189,10 @@ static int pkey_initialize(void) /* * Prevent the usage of OS reserved keys. Update UAMOR - * for those keys. + * for those keys. Also mark the rest of the bits in the + * 32 bit mask as reserved. */ - for (i = num_pkey; i < pkeys_total; i++) { + for (i = num_pkey; i < 32 ; i++) { reserved_allocation_mask |= (0x1 << i); default_uamor &= ~(0x3ul << pkeyshift(i)); } -- cgit v1.2.3 From d3cd91fb8d2e202cf8ebb6f271898aaf37ecda8f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:36 +0530 Subject: powerpc/book3s64/pkeys: Add MMU_FTR_PKEY Parse storage keys related device tree entry in early_init_devtree and enable MMU feature MMU_FTR_PKEY if pkeys are supported. MMU feature is used instead of CPU feature because this enables us to group MMU_FTR_KUAP and MMU_FTR_PKEY in asm feature fixup code. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-14-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/mmu.h | 6 ++++ arch/powerpc/include/asm/mmu.h | 10 ++++++ arch/powerpc/kernel/prom.c | 5 +++ arch/powerpc/mm/book3s64/pkeys.c | 52 ++++++++++++++++++-------------- 4 files changed, 51 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 15aae924f41c..55442d45c597 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -214,6 +214,12 @@ extern int mmu_io_psize; void mmu_early_init_devtree(void); void hash__early_init_devtree(void); void radix__early_init_devtree(void); +#ifdef CONFIG_PPC_MEM_KEYS +void pkey_early_init_devtree(void); +#else +static inline void pkey_early_init_devtree(void) {} +#endif + extern void hash__early_init_mmu(void); extern void radix__early_init_mmu(void); static inline void __init early_init_mmu(void) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 884d51995934..17cbaf132631 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -28,6 +28,11 @@ * Individual features below. */ +/* + * Support for memory protection keys. + */ +#define MMU_FTR_PKEY ASM_CONST(0x00000800) + /* Guest Translation Shootdown Enable */ #define MMU_FTR_GTSE ASM_CONST(0x00001000) @@ -181,6 +186,9 @@ enum { MMU_FTR_RADIX_KUAP | #endif /* CONFIG_PPC_KUAP */ #endif /* CONFIG_PPC_RADIX_MMU */ +#ifdef CONFIG_PPC_MEM_KEYS + MMU_FTR_PKEY | +#endif 0, }; @@ -360,6 +368,8 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size); static inline void mmu_early_init_devtree(void) { } +static inline void pkey_early_init_devtree(void) {} + extern void *abatron_pteptrs[2]; #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 033d43819ed8..dfe0103ad131 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -815,6 +815,11 @@ void __init early_init_devtree(void *params) /* Now try to figure out if we are running on LPAR and so on */ pseries_probe_fw_features(); + /* + * Initialize pkey features and default AMR/IAMR values + */ + pkey_early_init_devtree(); + #ifdef CONFIG_PPC_PS3 /* Identify PS3 firmware */ if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3")) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index d84bf55369a3..028b17c2c511 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -10,7 +10,8 @@ #include #include #include -#include +#include + DEFINE_STATIC_KEY_FALSE(pkey_disabled); int num_pkey; /* Max number of pkeys supported */ @@ -46,31 +47,38 @@ static int execute_only_key = 2; #define PKEY_REG_BITS (sizeof(u64) * 8) #define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY)) +static int __init dt_scan_storage_keys(unsigned long node, + const char *uname, int depth, + void *data) +{ + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *prop; + int *pkeys_total = (int *) data; + + /* We are scanning "cpu" nodes only */ + if (type == NULL || strcmp(type, "cpu") != 0) + return 0; + + prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL); + if (!prop) + return 0; + *pkeys_total = be32_to_cpu(prop[0]); + return 1; +} + static int scan_pkey_feature(void) { - u32 vals[2]; + int ret; int pkeys_total = 0; - struct device_node *cpu; /* * Pkey is not supported with Radix translation. */ - if (radix_enabled()) - return 0; - - cpu = of_find_node_by_type(NULL, "cpu"); - if (!cpu) + if (early_radix_enabled()) return 0; - if (of_property_read_u32_array(cpu, - "ibm,processor-storage-keys", vals, 2) == 0) { - /* - * Since any pkey can be used for data or execute, we will - * just treat all keys as equal and track them as one entity. - */ - pkeys_total = vals[0]; - } else { - + ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total); + if (ret == 0) { /* * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device * tree. We make this exception since some version of skiboot forgot to @@ -94,7 +102,7 @@ static int scan_pkey_feature(void) return pkeys_total; } -static int pkey_initialize(void) +void __init pkey_early_init_devtree(void) { int pkeys_total, i; @@ -119,9 +127,11 @@ static int pkey_initialize(void) if (!pkeys_total) { /* No support for pkey. Mark it disabled */ static_branch_enable(&pkey_disabled); - return 0; + return; } + cur_cpu_spec->mmu_features |= MMU_FTR_PKEY; + /* * The device tree cannot be relied to indicate support for * execute_disable support. Instead we use a PVR check. @@ -201,11 +211,9 @@ static int pkey_initialize(void) */ initial_allocation_mask |= reserved_allocation_mask; - return 0; + return; } -arch_initcall(pkey_initialize); - void pkey_mm_init(struct mm_struct *mm) { if (static_branch_likely(&pkey_disabled)) -- cgit v1.2.3 From e10cc8715d180509a367d3ab25d40e4a1612cb2f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:37 +0530 Subject: powerpc/book3s64/kuep: Add MMU_FTR_KUEP This will be used to enable/disable Kernel Userspace Execution Prevention (KUEP). Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-15-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/mmu.h | 9 +++++++++ arch/powerpc/mm/book3s64/radix_pgtable.c | 4 +++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 17cbaf132631..255a1837e9f7 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -28,6 +28,11 @@ * Individual features below. */ +/* + * Support for KUEP feature. + */ +#define MMU_FTR_KUEP ASM_CONST(0x00000400) + /* * Support for memory protection keys. */ @@ -189,6 +194,10 @@ enum { #ifdef CONFIG_PPC_MEM_KEYS MMU_FTR_PKEY | #endif +#ifdef CONFIG_PPC_KUEP + MMU_FTR_KUEP | +#endif /* CONFIG_PPC_KUAP */ + 0, }; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index c5bf2ef73c36..3536675dd61e 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -593,8 +593,10 @@ void setup_kuep(bool disabled) if (disabled || !early_radix_enabled()) return; - if (smp_processor_id() == boot_cpuid) + if (smp_processor_id() == boot_cpuid) { pr_info("Activating Kernel Userspace Execution Prevention\n"); + cur_cpu_spec->mmu_features |= MMU_FTR_KUEP; + } /* * Radix always uses key0 of the IAMR to determine if an access is -- cgit v1.2.3 From 2daf298de728dc37f32d0749fa4f59db36fa7d96 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:38 +0530 Subject: powerpc/book3s64/pkeys: Use pkey_execute_disable_supported Use pkey_execute_disable_supported to check for execute key support instead of pkey_disabled. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-16-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/pkeys.h | 10 +--------- arch/powerpc/mm/book3s64/pkeys.c | 6 +++--- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index dd32e30f6767..f44a14d64d47 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -126,15 +126,7 @@ static inline int mm_pkey_free(struct mm_struct *mm, int pkey) * Try to dedicate one of the protection keys to be used as an * execute-only protection key. */ -extern int __execute_only_pkey(struct mm_struct *mm); -static inline int execute_only_pkey(struct mm_struct *mm) -{ - if (static_branch_likely(&pkey_disabled)) - return -1; - - return __execute_only_pkey(mm); -} - +extern int execute_only_pkey(struct mm_struct *mm); extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey); static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 028b17c2c511..9969bc82e2ce 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -23,7 +23,6 @@ u32 reserved_allocation_mask __ro_after_init; /* Bits set for the initially allocated keys */ static u32 initial_allocation_mask __ro_after_init; -static bool pkey_execute_disable_supported; /* * Even if we allocate keys with sys_pkey_alloc(), we need to make sure * other thread still find the access denied using the same keys. @@ -38,6 +37,7 @@ static u64 default_uamor = ~0x0UL; * We pick key 2 because 0 is special key and 1 is reserved as per ISA. */ static int execute_only_key = 2; +static bool pkey_execute_disable_supported; #define AMR_BITS_PER_PKEY 2 @@ -151,7 +151,7 @@ void __init pkey_early_init_devtree(void) num_pkey = pkeys_total; #endif - if (unlikely(num_pkey <= execute_only_key)) { + if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) { /* * Insufficient number of keys to support * execute only key. Mark it unavailable. @@ -359,7 +359,7 @@ void thread_pkey_regs_init(struct thread_struct *thread) write_uamor(default_uamor); } -int __execute_only_pkey(struct mm_struct *mm) +int execute_only_pkey(struct mm_struct *mm) { return mm->context.execute_only_pkey; } -- cgit v1.2.3 From f7045a45115b17fe695ea7075f5213706f202edb Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:39 +0530 Subject: powerpc/book3s64/pkeys: Use MMU_FTR_PKEY instead of pkey_disabled static key Instead of pkey_disabled static key use mmu feature MMU_FTR_PKEY. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-17-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/pkeys.h | 2 +- arch/powerpc/include/asm/pkeys.h | 14 ++++++-------- arch/powerpc/mm/book3s64/pkeys.c | 17 +++++++---------- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h index 8174662a9173..5b178139f3c0 100644 --- a/arch/powerpc/include/asm/book3s/64/pkeys.h +++ b/arch/powerpc/include/asm/book3s/64/pkeys.h @@ -7,7 +7,7 @@ static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return 0x0UL; if (radix_enabled()) diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index f44a14d64d47..a7951049e129 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -11,7 +11,6 @@ #include #include -DECLARE_STATIC_KEY_FALSE(pkey_disabled); extern int num_pkey; extern u32 reserved_allocation_mask; /* bits set for reserved keys */ @@ -38,7 +37,7 @@ static inline u64 pkey_to_vmflag_bits(u16 pkey) static inline int vma_pkey(struct vm_area_struct *vma) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return 0; return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT; } @@ -93,9 +92,8 @@ static inline int mm_pkey_alloc(struct mm_struct *mm) u32 all_pkeys_mask = (u32)(~(0x0)); int ret; - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return -1; - /* * Are we out of pkeys? We must handle this specially because ffz() * behavior is undefined if there are no zeros. @@ -111,7 +109,7 @@ static inline int mm_pkey_alloc(struct mm_struct *mm) static inline int mm_pkey_free(struct mm_struct *mm, int pkey) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return -1; if (!mm_pkey_is_allocated(mm, pkey)) @@ -132,7 +130,7 @@ extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return 0; /* @@ -150,7 +148,7 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return -EINVAL; /* @@ -167,7 +165,7 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, static inline bool arch_pkeys_enabled(void) { - return !static_branch_likely(&pkey_disabled); + return mmu_has_feature(MMU_FTR_PKEY); } extern void pkey_mm_init(struct mm_struct *mm); diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 9969bc82e2ce..2c1bddccd67b 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -12,8 +12,6 @@ #include #include - -DEFINE_STATIC_KEY_FALSE(pkey_disabled); int num_pkey; /* Max number of pkeys supported */ /* * Keys marked in the reservation list cannot be allocated by userspace @@ -126,7 +124,6 @@ void __init pkey_early_init_devtree(void) pkeys_total = scan_pkey_feature(); if (!pkeys_total) { /* No support for pkey. Mark it disabled */ - static_branch_enable(&pkey_disabled); return; } @@ -216,7 +213,7 @@ void __init pkey_early_init_devtree(void) void pkey_mm_init(struct mm_struct *mm) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return; mm_pkey_allocation_map(mm) = initial_allocation_mask; mm->context.execute_only_pkey = execute_only_key; @@ -320,7 +317,7 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, void thread_pkey_regs_save(struct thread_struct *thread) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return; /* @@ -334,7 +331,7 @@ void thread_pkey_regs_save(struct thread_struct *thread) void thread_pkey_regs_restore(struct thread_struct *new_thread, struct thread_struct *old_thread) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return; if (old_thread->amr != new_thread->amr) @@ -347,7 +344,7 @@ void thread_pkey_regs_restore(struct thread_struct *new_thread, void thread_pkey_regs_init(struct thread_struct *thread) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return; thread->amr = default_amr; @@ -418,7 +415,7 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute) bool arch_pte_access_permitted(u64 pte, bool write, bool execute) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return true; return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute); @@ -435,7 +432,7 @@ bool arch_pte_access_permitted(u64 pte, bool write, bool execute) bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return true; /* * Do not enforce our key-permissions on a foreign vma. @@ -448,7 +445,7 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm) { - if (static_branch_likely(&pkey_disabled)) + if (!mmu_has_feature(MMU_FTR_PKEY)) return; /* Duplicate the oldmm pkey state in mm: */ -- cgit v1.2.3 From 7cdd3745f2d75aecc2b61368e2563ae54bfac59a Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:40 +0530 Subject: powerpc/book3s64/keys: Print information during boot. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-18-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 2c1bddccd67b..e776544a4d11 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -208,6 +208,7 @@ void __init pkey_early_init_devtree(void) */ initial_allocation_mask |= reserved_allocation_mask; + pr_info("Enabling pkeys with max key count %d\n", num_pkey); return; } -- cgit v1.2.3 From 000a42b35a54372597f0657f6b9875b38c641864 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:41 +0530 Subject: powerpc/book3s64/keys/kuap: Reset AMR/IAMR values on kexec As we kexec across kernels that use AMR/IAMR for different purposes we need to ensure that new kernels get kexec'd with a reset value of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old AMR value prevents access to key 0. This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR is not needed and the IAMR reset is partial (it doesn't do the reset on secondary cpus) and is redundant with this patch. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-19-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/kexec.h | 23 +++++++++++++++++++++++ arch/powerpc/include/asm/kexec.h | 12 ++++++++++++ arch/powerpc/kernel/misc_64.S | 14 -------------- arch/powerpc/kexec/core_64.c | 2 ++ arch/powerpc/mm/book3s64/pgtable.c | 3 +++ 5 files changed, 40 insertions(+), 14 deletions(-) create mode 100644 arch/powerpc/include/asm/book3s/64/kexec.h diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h b/arch/powerpc/include/asm/book3s/64/kexec.h new file mode 100644 index 000000000000..6b5c3a248ba2 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/kexec.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_ +#define _ASM_POWERPC_BOOK3S_64_KEXEC_H_ + + +#define reset_sprs reset_sprs +static inline void reset_sprs(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + mtspr(SPRN_AMR, 0); + mtspr(SPRN_UAMOR, 0); + } + + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + mtspr(SPRN_IAMR, 0); + } + + /* Do we need isync()? We are going via a kexec reset */ + isync(); +} + +#endif diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index c68476818753..89f7e3462292 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -150,6 +150,18 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#endif + +#ifndef reset_sprs +#define reset_sprs reset_sprs +static inline void reset_sprs(void) +{ +} +#endif + #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KEXEC_H */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 1864605eca29..7bb46ad98207 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -413,20 +413,6 @@ _GLOBAL(kexec_sequence) li r0,0 std r0,16(r1) -BEGIN_FTR_SECTION - /* - * This is the best time to turn AMR/IAMR off. - * key 0 is used in radix for supervisor<->user - * protection, but on hash key 0 is reserved - * ideally we want to enter with a clean state. - * NOTE, we rely on r0 being 0 from above. - */ - mtspr SPRN_IAMR,r0 -BEGIN_FTR_SECTION_NESTED(42) - mtspr SPRN_AMOR,r0 -END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - /* save regs for local vars on new stack. * yes, we won't go back, but ... */ diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index b4184092172a..8a449b2d8715 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -152,6 +152,8 @@ static void kexec_smp_down(void *arg) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 1); + reset_sprs(); + kexec_smp_wait(); /* NOTREACHED */ } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 85de5b574dd7..e18ae50a275c 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -165,6 +166,8 @@ void mmu_cleanup_all(void) radix__mmu_cleanup_all(); else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); + + reset_sprs(); } #ifdef CONFIG_MEMORY_HOTPLUG -- cgit v1.2.3 From e0d8e991be641ba0034c67785bf86f6c097869d6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:42 +0530 Subject: powerpc/book3s64/kuap: Move UAMOR setup to key init function UAMOR values are not application-specific. The kernel initializes its value based on different reserved keys. Remove the thread-specific UAMOR value and don't switch the UAMOR on context switch. Move UAMOR initialization to key initialization code and remove thread_struct.uamor because it is not used anymore. Before commit: 4a4a5e5d2aad ("powerpc/pkeys: key allocation/deallocation must not change pkey registers") we used to update uamor based on key allocation and free. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-20-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/pkeys.h | 2 ++ arch/powerpc/include/asm/processor.h | 1 - arch/powerpc/kernel/ptrace/ptrace-view.c | 27 +++++++++++++++++++++------ arch/powerpc/kernel/smp.c | 1 + arch/powerpc/mm/book3s64/hash_utils.c | 4 ++++ arch/powerpc/mm/book3s64/pkeys.c | 29 ++++++++++++----------------- arch/powerpc/mm/book3s64/radix_pgtable.c | 4 ++++ 7 files changed, 44 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h index 5b178139f3c0..b7d9f4267bcd 100644 --- a/arch/powerpc/include/asm/book3s/64/pkeys.h +++ b/arch/powerpc/include/asm/book3s/64/pkeys.h @@ -5,6 +5,8 @@ #include +extern u64 __ro_after_init default_uamor; + static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) { if (!mmu_has_feature(MMU_FTR_PKEY)) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 52a67835057a..6ac12168f1fe 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -237,7 +237,6 @@ struct thread_struct { #ifdef CONFIG_PPC_MEM_KEYS unsigned long amr; unsigned long iamr; - unsigned long uamor; #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER void* kvm_shadow_vcpu; /* KVM internal data */ diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index caeb5822a8f4..ac7d480cb9c1 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -488,14 +488,21 @@ static int pkey_active(struct task_struct *target, const struct user_regset *reg static int pkey_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + int ret; + BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr)); - BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor)); if (!arch_pkeys_enabled()) return -ENODEV; - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.amr, - 0, ELF_NPKEY * sizeof(unsigned long)); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.amr, + 0, 2 * sizeof(unsigned long)); + if (ret) + return ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &default_uamor, + 2 * sizeof(unsigned long), 3 * sizeof(unsigned long)); + return ret; } static int pkey_set(struct task_struct *target, const struct user_regset *regset, @@ -517,9 +524,17 @@ static int pkey_set(struct task_struct *target, const struct user_regset *regset if (ret) return ret; - /* UAMOR determines which bits of the AMR can be set from userspace. */ - target->thread.amr = (new_amr & target->thread.uamor) | - (target->thread.amr & ~target->thread.uamor); + /* + * UAMOR determines which bits of the AMR can be set from userspace. + * UAMOR value 0b11 indicates that the AMR value can be modified + * from userspace. If the kernel is using a specific key, we avoid + * userspace modifying the AMR value for that key by masking them + * via UAMOR 0b00. + * + * Pick the AMR values for the keys that kernel is using. This + * will be indicated by the ~default_uamor bits. + */ + target->thread.amr = (new_amr & default_uamor) | (target->thread.amr & ~default_uamor); return 0; } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 73199470c265..8261999c7d52 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -59,6 +59,7 @@ #include #include #include +#include #ifdef DEBUG #include diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index eec6f4e5e481..9dfb0ceed5e3 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1110,6 +1110,10 @@ void hash__early_init_mmu_secondary(void) if (cpu_has_feature(CPU_FTR_ARCH_206) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); + +#ifdef CONFIG_PPC_MEM_KEYS + mtspr(SPRN_UAMOR, default_uamor); +#endif } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index e776544a4d11..d45864a77066 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -27,9 +27,7 @@ static u32 initial_allocation_mask __ro_after_init; */ static u64 default_amr = ~0x0UL; static u64 default_iamr = 0x5555555555555555UL; - -/* Allow all keys to be modified by default */ -static u64 default_uamor = ~0x0UL; +u64 default_uamor __ro_after_init; /* * Key used to implement PROT_EXEC mmap. Denies READ/WRITE * We pick key 2 because 0 is special key and 1 is reserved as per ISA. @@ -122,10 +120,11 @@ void __init pkey_early_init_devtree(void) /* scan the device tree for pkey feature */ pkeys_total = scan_pkey_feature(); - if (!pkeys_total) { - /* No support for pkey. Mark it disabled */ - return; - } + if (!pkeys_total) + goto out; + + /* Allow all keys to be modified by default */ + default_uamor = ~0x0UL; cur_cpu_spec->mmu_features |= MMU_FTR_PKEY; @@ -209,6 +208,12 @@ void __init pkey_early_init_devtree(void) initial_allocation_mask |= reserved_allocation_mask; pr_info("Enabling pkeys with max key count %d\n", num_pkey); +out: + /* + * Setup uamor on boot cpu + */ + mtspr(SPRN_UAMOR, default_uamor); + return; } @@ -251,11 +256,6 @@ static inline u64 read_uamor(void) return mfspr(SPRN_UAMOR); } -static inline void write_uamor(u64 value) -{ - mtspr(SPRN_UAMOR, value); -} - static bool is_pkey_enabled(int pkey) { u64 uamor = read_uamor(); @@ -326,7 +326,6 @@ void thread_pkey_regs_save(struct thread_struct *thread) */ thread->amr = read_amr(); thread->iamr = read_iamr(); - thread->uamor = read_uamor(); } void thread_pkey_regs_restore(struct thread_struct *new_thread, @@ -339,8 +338,6 @@ void thread_pkey_regs_restore(struct thread_struct *new_thread, write_amr(new_thread->amr); if (old_thread->iamr != new_thread->iamr) write_iamr(new_thread->iamr); - if (old_thread->uamor != new_thread->uamor) - write_uamor(new_thread->uamor); } void thread_pkey_regs_init(struct thread_struct *thread) @@ -350,11 +347,9 @@ void thread_pkey_regs_init(struct thread_struct *thread) thread->amr = default_amr; thread->iamr = default_iamr; - thread->uamor = default_uamor; write_amr(default_amr); write_iamr(default_iamr); - write_uamor(default_uamor); } int execute_only_pkey(struct mm_struct *mm) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 3536675dd61e..a8f46a686219 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -620,6 +620,10 @@ void setup_kuap(bool disabled) /* Make sure userspace can't change the AMR */ mtspr(SPRN_UAMOR, 0); + + /* + * Set the default kernel AMR values on all cpus. + */ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); isync(); } -- cgit v1.2.3 From 9a11f12e0a6c374b3ef1ce81e32ce477d28eb1b8 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:43 +0530 Subject: selftests/powerpc: ptrace-pkey: Rename variables to make it easier to follow code Rename variable to indicate that they are invalid values which we will use to test ptrace update of pkeys. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-21-aneesh.kumar@linux.ibm.com --- .../testing/selftests/powerpc/ptrace/ptrace-pkey.c | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index 904c04f8c919..a32a8034f956 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -44,7 +44,7 @@ struct shared_info { unsigned long amr2; /* AMR value that ptrace should refuse to write to the child. */ - unsigned long amr3; + unsigned long invalid_amr; /* IAMR value the parent expects to read from the child. */ unsigned long expected_iamr; @@ -57,8 +57,8 @@ struct shared_info { * (even though they're valid ones) because userspace doesn't have * access to those registers. */ - unsigned long new_iamr; - unsigned long new_uamor; + unsigned long invalid_iamr; + unsigned long invalid_uamor; }; static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) @@ -100,7 +100,7 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3); + info->invalid_amr |= info->amr2 | 3ul << pkeyshift(pkey3); if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); @@ -111,8 +111,8 @@ static int child(struct shared_info *info) info->expected_uamor |= 3ul << pkeyshift(pkey1) | 3ul << pkeyshift(pkey2); - info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->new_uamor |= 3ul << pkeyshift(pkey1); + info->invalid_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); + info->invalid_uamor |= 3ul << pkeyshift(pkey1); /* * We won't use pkey3. We just want a plausible but invalid key to test @@ -196,9 +196,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->amr3 = regs[0]; - info->expected_iamr = info->new_iamr = regs[1]; - info->expected_uamor = info->new_uamor = regs[2]; + info->amr1 = info->amr2 = info->invalid_amr = regs[0]; + info->expected_iamr = info->invalid_iamr = regs[1]; + info->expected_uamor = info->invalid_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); @@ -234,10 +234,10 @@ static int parent(struct shared_info *info, pid_t pid) return ret; /* Write invalid AMR value in child. */ - ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1); + ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1); PARENT_FAIL_IF(ret, &info->child_sync); - printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3); + printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr); /* Wake up child so that it can verify it didn't change. */ ret = prod_child(&info->child_sync); @@ -249,7 +249,7 @@ static int parent(struct shared_info *info, pid_t pid) /* Try to write to IAMR. */ regs[0] = info->amr1; - regs[1] = info->new_iamr; + regs[1] = info->invalid_iamr; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2); PARENT_FAIL_IF(!ret, &info->child_sync); @@ -257,7 +257,7 @@ static int parent(struct shared_info *info, pid_t pid) ptrace_write_running, regs[0], regs[1]); /* Try to write to IAMR and UAMOR. */ - regs[2] = info->new_uamor; + regs[2] = info->invalid_uamor; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3); PARENT_FAIL_IF(!ret, &info->child_sync); -- cgit v1.2.3 From 0eaa3b5ca7b5a76e3783639c828498343be66a01 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:44 +0530 Subject: selftests/powerpc: ptrace-pkey: Update the test to mark an invalid pkey correctly Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-22-aneesh.kumar@linux.ibm.com --- .../testing/selftests/powerpc/ptrace/ptrace-pkey.c | 30 +++++++++------------- 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index a32a8034f956..80dc5085a412 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) return syscall(__NR_pkey_alloc, flags, init_access_rights); } -static int sys_pkey_free(int pkey) -{ - return syscall(__NR_pkey_free, pkey); -} - static int child(struct shared_info *info) { unsigned long reg; @@ -100,7 +95,11 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->invalid_amr |= info->amr2 | 3ul << pkeyshift(pkey3); + /* + * invalid amr value where we try to force write + * things which are deined by a uamor setting. + */ + info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); @@ -111,17 +110,12 @@ static int child(struct shared_info *info) info->expected_uamor |= 3ul << pkeyshift(pkey1) | 3ul << pkeyshift(pkey2); - info->invalid_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->invalid_uamor |= 3ul << pkeyshift(pkey1); - /* - * We won't use pkey3. We just want a plausible but invalid key to test - * whether ptrace will let us write to AMR bits we are not supposed to. - * - * This also tests whether the kernel restores the UAMOR permissions - * after a key is freed. + * Create an IAMR value different from expected value. + * Kernel will reject an IAMR and UAMOR change. */ - sys_pkey_free(pkey3); + info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2)); + info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1)); printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr1, pkey1, pkey2, pkey3); @@ -196,9 +190,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->invalid_amr = regs[0]; - info->expected_iamr = info->invalid_iamr = regs[1]; - info->expected_uamor = info->invalid_uamor = regs[2]; + info->amr1 = info->amr2 = regs[0]; + info->expected_iamr = regs[1]; + info->expected_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); -- cgit v1.2.3 From 3563b9bea0ca7f53e4218b5e268550341a49f333 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:45 +0530 Subject: selftests/powerpc: ptrace-pkey: Don't update expected UAMOR value With commit 4a4a5e5d2aad ("powerpc/pkeys: key allocation/deallocation must not change pkey registers") we are not updating UAMOR on key allocation. So don't update the expected uamor value in the test. Fixes: 4a4a5e5d2aad ("powerpc/pkeys: key allocation/deallocation must not change pkey registers") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-23-aneesh.kumar@linux.ibm.com --- tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index 80dc5085a412..bc454f899124 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -101,15 +101,20 @@ static int child(struct shared_info *info) */ info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); + /* + * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr + */ if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); else info->expected_iamr &= ~(1ul << pkeyshift(pkey1)); - info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3)); + /* + * We allocated pkey2 and pkey 3 above. Clear the IAMR bits. + */ + info->expected_iamr &= ~(1ul << pkeyshift(pkey2)); + info->expected_iamr &= ~(1ul << pkeyshift(pkey3)); - info->expected_uamor |= 3ul << pkeyshift(pkey1) | - 3ul << pkeyshift(pkey2); /* * Create an IAMR value different from expected value. * Kernel will reject an IAMR and UAMOR change. -- cgit v1.2.3 From 482b9b3948675df60c015b2155011c1f93234992 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:46 +0530 Subject: powerpc/book3s64/pkeys: Remove is_pkey_enabled() There is only one caller to this function and the function is wrongly named. Avoid further confusion w.r.t name and open code this at the only call site. Also remove read_uamor(). There are no users for the same after this. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-24-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index d45864a77066..792b36aa9619 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -251,25 +251,6 @@ static inline void write_iamr(u64 value) mtspr(SPRN_IAMR, value); } -static inline u64 read_uamor(void) -{ - return mfspr(SPRN_UAMOR); -} - -static bool is_pkey_enabled(int pkey) -{ - u64 uamor = read_uamor(); - u64 pkey_bits = 0x3ul << pkeyshift(pkey); - u64 uamor_pkey_bits = (uamor & pkey_bits); - - /* - * Both the bits in UAMOR corresponding to the key should be set or - * reset. - */ - WARN_ON(uamor_pkey_bits && (uamor_pkey_bits != pkey_bits)); - return !!(uamor_pkey_bits); -} - static inline void init_amr(int pkey, u8 init_bits) { u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); @@ -295,8 +276,18 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, { u64 new_amr_bits = 0x0ul; u64 new_iamr_bits = 0x0ul; + u64 pkey_bits, uamor_pkey_bits; - if (!is_pkey_enabled(pkey)) + /* + * Check whether the key is disabled by UAMOR. + */ + pkey_bits = 0x3ul << pkeyshift(pkey); + uamor_pkey_bits = (default_uamor & pkey_bits); + + /* + * Both the bits in UAMOR corresponding to the key should be set + */ + if (uamor_pkey_bits != pkey_bits) return -EINVAL; if (init_val & PKEY_DISABLE_EXECUTE) { -- cgit v1.2.3 From 9d1ebe9a98c1d7bf7cfbe1dba0052230c042ecdb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 16 Jul 2020 22:21:42 +1000 Subject: selftests/powerpc: Run per_event_excludes test on Power8 or later The per_event_excludes test wants to run on Power8 or later. But currently it checks that AT_BASE_PLATFORM *equals* power8, which means it only runs on Power8. Fix it to check for the ISA 2.07 feature, which will be set on Power8 and later CPUs. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200716122142.3776261-1-mpe@ellerman.id.au --- tools/testing/selftests/powerpc/pmu/per_event_excludes.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/powerpc/pmu/per_event_excludes.c b/tools/testing/selftests/powerpc/pmu/per_event_excludes.c index 2756fe2efdc5..2d37942bf72b 100644 --- a/tools/testing/selftests/powerpc/pmu/per_event_excludes.c +++ b/tools/testing/selftests/powerpc/pmu/per_event_excludes.c @@ -12,6 +12,8 @@ #include #include +#include + #include "event.h" #include "lib.h" #include "utils.h" @@ -23,12 +25,9 @@ static int per_event_excludes(void) { struct event *e, events[4]; - char *platform; int i; - platform = (char *)get_auxv_entry(AT_BASE_PLATFORM); - FAIL_IF(!platform); - SKIP_IF(strcmp(platform, "power8") != 0); + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); /* * We need to create the events disabled, otherwise the running/enabled -- cgit v1.2.3 From 38b407be172d3d15afdbfe172691b7caad98120f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 8 May 2020 23:06:33 +1000 Subject: powerpc/spufs: Rework fcheck() usage Currently the spu coredump code triggers an RCU warning: ============================= WARNING: suspicious RCU usage 5.7.0-rc3-01755-g7cd49f0b7ec7 #1 Not tainted ----------------------------- include/linux/fdtable.h:95 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by spu-coredump/1343: #0: c0000007fa22f430 (sb_writers#2){.+.+}-{0:0}, at: .do_coredump+0x1010/0x13c8 stack backtrace: CPU: 0 PID: 1343 Comm: spu-coredump Not tainted 5.7.0-rc3-01755-g7cd49f0b7ec7 #1 Call Trace: .dump_stack+0xec/0x15c (unreliable) .lockdep_rcu_suspicious+0x120/0x144 .coredump_next_context+0x148/0x158 .spufs_coredump_extra_notes_size+0x54/0x190 .elf_coredump_extra_notes_size+0x34/0x50 .elf_core_dump+0xe48/0x19d0 .do_coredump+0xe50/0x13c8 .get_signal+0x864/0xd88 .do_notify_resume+0x158/0x3c8 .interrupt_exit_user_prepare+0x19c/0x208 interrupt_return+0x14/0x1c0 This comes from fcheck_files() via fcheck(). It's pretty clearly documented that fcheck() must be wrapped with rcu_read_lock(), adding that fixes the RCU warning. hch points out that once we've released the RCU read lock the file may be closed and freed, which would leave us with a pointer to a freed spu_context. To avoid that, take a reference to the spu_context while we hold the RCU read lock, and drop that reference later once we're done with the context. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200508130633.2532759-1-mpe@ellerman.id.au --- arch/powerpc/platforms/cell/spufs/coredump.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 3b75e8f60609..40dd7b483f06 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -66,13 +66,20 @@ static int match_context(const void *v, struct file *file, unsigned fd) */ static struct spu_context *coredump_next_context(int *fd) { + struct spu_context *ctx; struct file *file; int n = iterate_fd(current->files, *fd, match_context, NULL); if (!n) return NULL; *fd = n - 1; + + rcu_read_lock(); file = fcheck(*fd); - return SPUFS_I(file_inode(file))->i_ctx; + ctx = SPUFS_I(file_inode(file))->i_ctx; + get_spu_context(ctx); + rcu_read_unlock(); + + return ctx; } int spufs_coredump_extra_notes_size(void) @@ -83,17 +90,23 @@ int spufs_coredump_extra_notes_size(void) fd = 0; while ((ctx = coredump_next_context(&fd)) != NULL) { rc = spu_acquire_saved(ctx); - if (rc) + if (rc) { + put_spu_context(ctx); break; + } + rc = spufs_ctx_note_size(ctx, fd); spu_release_saved(ctx); - if (rc < 0) + if (rc < 0) { + put_spu_context(ctx); break; + } size += rc; /* start searching the next fd next time */ fd++; + put_spu_context(ctx); } return size; -- cgit v1.2.3 From c8ed9fc9d29e24dafd08971e6a0c6b302a8ade2d Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sat, 18 Jul 2020 12:39:58 +0200 Subject: powerpc: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200718103958.5455-1-grandmaster@al2klimov.de --- Documentation/powerpc/mpc52xx.rst | 2 +- arch/powerpc/crypto/crc32-vpmsum_core.S | 2 +- arch/powerpc/include/asm/hydra.h | 2 +- drivers/crypto/vmx/aesp8-ppc.pl | 2 +- drivers/crypto/vmx/ghashp8-ppc.pl | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/powerpc/mpc52xx.rst b/Documentation/powerpc/mpc52xx.rst index 8676ac63e077..30260707c3fe 100644 --- a/Documentation/powerpc/mpc52xx.rst +++ b/Documentation/powerpc/mpc52xx.rst @@ -2,7 +2,7 @@ Linux 2.6.x on MPC52xx family ============================= -For the latest info, go to http://www.246tNt.com/mpc52xx/ +For the latest info, go to https://www.246tNt.com/mpc52xx/ To compile/use : diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/crypto/crc32-vpmsum_core.S index c3524eba4d0d..a16a717c809c 100644 --- a/arch/powerpc/crypto/crc32-vpmsum_core.S +++ b/arch/powerpc/crypto/crc32-vpmsum_core.S @@ -19,7 +19,7 @@ * We then use fixed point Barrett reduction to compute a mod n over GF(2) * for n = CRC using POWER8 instructions. We use x = 32. * - * http://en.wikipedia.org/wiki/Barrett_reduction + * https://en.wikipedia.org/wiki/Barrett_reduction * * Copyright (C) 2015 Anton Blanchard , IBM */ diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h index b3b0f2d020f0..ae02eb53d6ef 100644 --- a/arch/powerpc/include/asm/hydra.h +++ b/arch/powerpc/include/asm/hydra.h @@ -10,7 +10,7 @@ * * Ā© Copyright 1995 Apple Computer, Inc. All rights reserved. * - * It's available online from http://www.cpu.lu/~mlan/ftp/MacTech.pdf + * It's available online from https://www.cpu.lu/~mlan/ftp/MacTech.pdf * You can obtain paper copies of this book from computer bookstores or by * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San * Francisco, CA 94104. Reference ISBN 1-55860-393-X. diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index db874367b602..50a0a18f35da 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl @@ -50,7 +50,7 @@ # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. +# details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # # This module implements support for AES instructions as per PowerISA diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl index 38b06503ede0..09bba1852eec 100644 --- a/drivers/crypto/vmx/ghashp8-ppc.pl +++ b/drivers/crypto/vmx/ghashp8-ppc.pl @@ -13,7 +13,7 @@ # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. +# details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # # GHASH for for PowerISA v2.07. -- cgit v1.2.3 From a7beab413e2e67dd1abe6bdd0001576892a89e81 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Fri, 17 Jul 2020 20:35:22 +0200 Subject: macintosh/adb: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200717183522.77605-1-grandmaster@al2klimov.de --- drivers/macintosh/adb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index e49d1f287a17..73b396189039 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -163,7 +163,7 @@ static int adb_scan_bus(void) * See if anybody actually moved. This is suggested * by HW TechNote 01: * - * http://developer.apple.com/technotes/hw/hw_01.html + * https://developer.apple.com/technotes/hw/hw_01.html */ adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, (highFree << 4) | 0xf); -- cgit v1.2.3 From 1666e5ea2f838f4266e50e4f3d973c0824256429 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Fri, 17 Jul 2020 20:29:40 +0200 Subject: macintosh/therm_adt746x: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200717182940.75484-1-grandmaster@al2klimov.de --- drivers/macintosh/therm_adt746x.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index 8f7725dc2166..7e218437730c 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -5,8 +5,8 @@ * Copyright (C) 2003, 2004 Colin Leroy, Rasmus Rohde, Benjamin Herrenschmidt * * Documentation from 115254175ADT7467_pra.pdf and 3686221171167ADT7460_b.pdf - * http://www.onsemi.com/PowerSolutions/product.do?id=ADT7467 - * http://www.onsemi.com/PowerSolutions/product.do?id=ADT7460 + * https://www.onsemi.com/PowerSolutions/product.do?id=ADT7467 + * https://www.onsemi.com/PowerSolutions/product.do?id=ADT7460 * */ -- cgit v1.2.3 From a3f3f8aa1f72dafe1450ccf8cbdfb1d12d42853a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 17 Jul 2020 19:27:14 +0800 Subject: powerpc: Remove unneeded inline functions Both of those functions are only called from 64-bit only code, so the stubs should not be needed at all. Suggested-by: Michael Ellerman Signed-off-by: YueHaibing Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200717112714.19304-1-yuehaibing@huawei.com --- arch/powerpc/include/asm/mmu_context.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 1a474f6b1992..7f3658a97384 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -218,8 +218,6 @@ static inline void inc_mm_active_cpus(struct mm_struct *mm) { } static inline void dec_mm_active_cpus(struct mm_struct *mm) { } static inline void mm_context_add_copro(struct mm_struct *mm) { } static inline void mm_context_remove_copro(struct mm_struct *mm) { } -static inline void mm_context_add_vas_windows(struct mm_struct *mm) { } -static inline void mm_context_remove_vas_windows(struct mm_struct *mm) { } #endif -- cgit v1.2.3 From f4ac1774f2cba44994ce9ac0a65772e4656ac2df Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 11:19:56 +1000 Subject: powerpc: Remove stale calc_vm_prot_bits() comment This comment is wrong, we wouldn't use calc_vm_prot_bits() here because we are being called by calc_vm_prot_bits() to modify its behaviour. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703011958.1166620-2-npiggin@gmail.com --- arch/powerpc/include/asm/mman.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index d610c2e07b28..4ba303ea27f5 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,10 +13,6 @@ #include #include -/* - * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() - * here. How important is the optimization? - */ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { -- cgit v1.2.3 From 5c9fa16e8abd342ce04dc830c1ebb2a03abf6c05 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 11:19:57 +1000 Subject: powerpc/64s: Remove PROT_SAO support ISA v3.1 does not support the SAO storage control attribute required to implement PROT_SAO. PROT_SAO was used by specialised system software (Lx86) that has been discontinued for about 7 years, and is not thought to be used elsewhere, so removal should not cause problems. We rather remove it than keep support for older processors, because live migrating guest partitions to newer processors may not be possible if SAO is in use (or worse allowed with silent races). - PROT_SAO stays in the uapi header so code using it would still build. - arch_validate_prot() is removed, the generic version rejects PROT_SAO so applications would get a failure at mmap() time. Signed-off-by: Nicholas Piggin [mpe: Drop KVM change for the time being] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703011958.1166620-3-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 8 +++-- arch/powerpc/include/asm/cputable.h | 10 +++---- arch/powerpc/include/asm/mman.h | 26 +++-------------- arch/powerpc/include/asm/nohash/64/pgtable.h | 2 -- arch/powerpc/include/uapi/asm/mman.h | 2 +- arch/powerpc/kernel/dt_cpu_ftrs.c | 2 +- arch/powerpc/mm/book3s64/hash_utils.c | 2 -- include/linux/mm.h | 2 -- include/trace/events/mmflags.h | 2 -- mm/ksm.c | 4 --- tools/testing/selftests/powerpc/mm/.gitignore | 1 - tools/testing/selftests/powerpc/mm/Makefile | 4 +-- tools/testing/selftests/powerpc/mm/prot_sao.c | 42 --------------------------- 13 files changed, 17 insertions(+), 90 deletions(-) delete mode 100644 tools/testing/selftests/powerpc/mm/prot_sao.c diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 495fc0ccb453..6de56c3b33c4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -20,9 +20,13 @@ #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ -#define _PAGE_SAO 0x00010 /* Strong access order */ + +#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ + /* No bits set is normal cacheable memory */ + /* 0x00010 unused, is SAO bit on radix POWER9 */ #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ + #define _PAGE_DIRTY 0x00080 /* C: page changed */ #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ /* @@ -824,8 +828,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, return hash__set_pte_at(mm, addr, ptep, pte, percpu); } -#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) - #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index dd0a2e77a695..a461c3300804 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -191,7 +191,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) -#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) +// Free LONG_ASM_CONST(0x0000000008000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) @@ -435,7 +435,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ + CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) @@ -444,7 +444,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_DSCR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ @@ -455,7 +455,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_DSCR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ @@ -473,7 +473,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_DSCR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 4ba303ea27f5..7c07728af300 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,38 +13,20 @@ #include #include +#ifdef CONFIG_PPC_MEM_KEYS static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { -#ifdef CONFIG_PPC_MEM_KEYS - return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); -#else - return ((prot & PROT_SAO) ? VM_SAO : 0); -#endif + return pkey_to_vmflag_bits(pkey); } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { -#ifdef CONFIG_PPC_MEM_KEYS - return (vm_flags & VM_SAO) ? - __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : - __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); -#else - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); -#endif + return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) - -static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) -{ - if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) - return false; - if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) - return false; - return true; -} -#define arch_validate_prot arch_validate_prot +#endif #endif /* CONFIG_PPC64 */ #endif /* _ASM_POWERPC_MMAN_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 6cb8aa357191..59ee9fa4ae09 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -82,8 +82,6 @@ */ #include -#define _PAGE_SAO 0 - #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) /* diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index c0c737215b00..3a700351feca 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -11,7 +11,7 @@ #include -#define PROT_SAO 0x10 /* Strong Access Ordering */ +#define PROT_SAO 0x10 /* Unsupported since v5.9 */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 554bec785f6a..9aa8537f7da2 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -622,7 +622,7 @@ static struct dt_cpu_feature_match __initdata {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, {"processor-utilization-of-resources-register", feat_enable_purr, 0}, {"no-execute", feat_enable, 0}, - {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, + /* strong-access-ordering is unused */ {"cache-inhibited-large-page", feat_enable_large_ci, 0}, {"coprocessor-icswx", feat_enable, 0}, {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0}, diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 9dfb0ceed5e3..6f9f346a5f65 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -232,8 +232,6 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); - else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) - rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set diff --git a/include/linux/mm.h b/include/linux/mm.h index dc7b87310c10..6c8333d6c991 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -317,8 +317,6 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ -#elif defined(CONFIG_PPC) -# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 5fb752034386..939092dbcb8b 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -114,8 +114,6 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) #if defined(CONFIG_X86) #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } -#elif defined(CONFIG_PPC) -#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } #elif !defined(CONFIG_MMU) diff --git a/mm/ksm.c b/mm/ksm.c index 4102034cd55a..d1cfa18689b5 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2452,10 +2452,6 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (vma_is_dax(vma)) return 0; -#ifdef VM_SAO - if (*vm_flags & VM_SAO) - return 0; -#endif #ifdef VM_SPARC_ADI if (*vm_flags & VM_SPARC_ADI) return 0; diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 8f841f925baa..8d041f508a51 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -2,7 +2,6 @@ hugetlb_vs_thp_test subpage_prot tempfile -prot_sao segv_errors wild_bctr large_vm_fork_separation diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index f9fa0ba7435c..5a86d59441dc 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -2,7 +2,7 @@ noarg: $(MAKE) -C ../ -TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ large_vm_fork_separation bad_accesses pkey_exec_prot TEST_GEN_PROGS_EXTENDED := tlbie_test TEST_GEN_FILES := tempfile @@ -12,8 +12,6 @@ include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c ../utils.c -$(OUTPUT)/prot_sao: ../utils.c - $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c deleted file mode 100644 index e2eed65b7735..000000000000 --- a/tools/testing/selftests/powerpc/mm/prot_sao.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2016, Michael Ellerman, IBM Corp. - */ - -#include -#include -#include -#include - -#include - -#include "utils.h" - -#define SIZE (64 * 1024) - -int test_prot_sao(void) -{ - char *p; - - /* 2.06 or later should support SAO */ - SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); - - /* - * Ensure we can ask for PROT_SAO. - * We can't really verify that it does the right thing, but at least we - * confirm the kernel will accept it. - */ - p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - FAIL_IF(p == MAP_FAILED); - - /* Write to the mapping, to at least cause a fault */ - memset(p, 0xaa, SIZE); - - return 0; -} - -int main(void) -{ - return test_harness(test_prot_sao, "prot-sao"); -} -- cgit v1.2.3 From 63396ada804c676e070bd1b8663046f18698ab27 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 11:19:58 +1000 Subject: powerpc/64s/hash: Disable subpage_prot syscall by default The subpage_prot syscall was added for specialised system software (Lx86) that has been discontinued for about 7 years, and is not thought to be used elsewhere, so disable it by default. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703011958.1166620-4-npiggin@gmail.com --- arch/powerpc/Kconfig | 7 +++++-- arch/powerpc/configs/powernv_defconfig | 1 - arch/powerpc/configs/pseries_defconfig | 1 - 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 51abc59c3334..70adf69f8b1a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -833,13 +833,16 @@ config FORCE_MAX_ZONEORDER this in mind when choosing a value for this option. config PPC_SUBPAGE_PROT - bool "Support setting protections for 4k subpages" + bool "Support setting protections for 4k subpages (subpage_prot syscall)" + default n depends on PPC_BOOK3S_64 && PPC_64K_PAGES help - This option adds support for a system call to allow user programs + This option adds support for system call to allow user programs to set access permissions (read/write, readonly, or no access) on the 4k subpages of each 64k page. + If unsure, say N here. + config PPC_COPRO_BASE bool diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 2de9aadf0f50..afc0dd73a1e6 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -64,7 +64,6 @@ CONFIG_HWPOISON_INJECT=m CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_PPC_64K_PAGES=y -CONFIG_PPC_SUBPAGE_PROT=y CONFIG_SCHED_SMT=y CONFIG_PM=y CONFIG_HOTPLUG_PCI=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index dfa4a726333b..894e8d85fb48 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -57,7 +57,6 @@ CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_PPC_64K_PAGES=y -CONFIG_PPC_SUBPAGE_PROT=y CONFIG_SCHED_SMT=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_RPA=m -- cgit v1.2.3 From 3c9450c053f88e525b2db1e6990cdf34d14e7696 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Sun, 14 Jun 2020 14:06:04 +0530 Subject: powerpc/perf: Fix missing is_sier_aviable() during build Compilation error: arch/powerpc/perf/perf_regs.c:80:undefined reference to `.is_sier_available' Currently is_sier_available() is part of core-book3s.c, which is added to build based on CONFIG_PPC_PERF_CTRS. A config with CONFIG_PERF_EVENTS and without CONFIG_PPC_PERF_CTRS will have a build break because of missing is_sier_available(). In practice it only breaks when CONFIG_FSL_EMB_PERF_EVENT=n because that also guards the usage of is_sier_available(). That only happens with CONFIG_PPC_BOOK3E_64=y and CONFIG_FSL_SOC_BOOKE=n. Patch adds is_sier_available() in asm/perf_event.h to fix the build break for configs missing CONFIG_PPC_PERF_CTRS. Fixes: 333804dc3b7a ("powerpc/perf: Update perf_regs structure to include SIER") Reported-by: Aneesh Kumar K.V Signed-off-by: Madhavan Srinivasan [mpe: Add detail about CONFIG_FSL_SOC_BOOKE] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200614083604.302611-1-maddy@linux.ibm.com --- arch/powerpc/include/asm/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index eed3954082fa..1e8b2e1ec1db 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -12,6 +12,8 @@ #ifdef CONFIG_PPC_PERF_CTRS #include +#else +static inline bool is_sier_available(void) { return false; } #endif #ifdef CONFIG_FSL_EMB_PERF_EVENT -- cgit v1.2.3 From 78d76819e6f04672989506e7792895a51438516e Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:13 -0400 Subject: powerpc/perf: Update cpu_hw_event to use `struct` for storing MMCR registers core-book3s currently uses array to store the MMCR registers as part of per-cpu `cpu_hw_events`. This patch does a clean up to use `struct` to store mmcr regs instead of array. This will make code easier to read and reduces chance of any subtle bug that may come in the future, say when new registers are added. Patch updates all relevant code that was using MMCR array ( cpuhw->mmcr[x]) to use newly introduced `struct`. This includes the PMU driver code for supported platforms (power5 to power9) and ISA macros for counter support functions. Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-2-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/include/asm/perf_event_server.h | 10 ++++-- arch/powerpc/perf/core-book3s.c | 53 +++++++++++++--------------- arch/powerpc/perf/isa207-common.c | 20 +++++------ arch/powerpc/perf/isa207-common.h | 4 +-- arch/powerpc/perf/mpc7450-pmu.c | 21 +++++++---- arch/powerpc/perf/power5+-pmu.c | 17 ++++----- arch/powerpc/perf/power5-pmu.c | 17 ++++----- arch/powerpc/perf/power6-pmu.c | 16 ++++----- arch/powerpc/perf/power7-pmu.c | 17 ++++----- arch/powerpc/perf/ppc970-pmu.c | 24 ++++++------- 10 files changed, 105 insertions(+), 94 deletions(-) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 3e9703f44c7c..f9a36680339d 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -17,6 +17,12 @@ struct perf_event; +struct mmcr_regs { + unsigned long mmcr0; + unsigned long mmcr1; + unsigned long mmcr2; + unsigned long mmcra; +}; /* * This struct provides the constants and functions needed to * describe the PMU on a particular POWER-family CPU. @@ -28,7 +34,7 @@ struct power_pmu { unsigned long add_fields; unsigned long test_adder; int (*compute_mmcr)(u64 events[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], + unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[]); int (*get_constraint)(u64 event_id, unsigned long *mskp, unsigned long *valp); @@ -41,7 +47,7 @@ struct power_pmu { unsigned long group_constraint_val; u64 (*bhrb_filter_map)(u64 branch_sample_type); void (*config_bhrb)(u64 pmu_bhrb_filter); - void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); + void (*disable_pmc)(unsigned int pmc, struct mmcr_regs *mmcr); int (*limited_pmc_event)(u64 event_id); u32 flags; const struct attribute_group **attr_groups; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cd6a742ac6ef..18b1b6aae692 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -37,12 +37,7 @@ struct cpu_hw_events { struct perf_event *event[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int flags[MAX_HWEVENTS]; - /* - * The order of the MMCR array is: - * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 - * - 32-bit, MMCR0, MMCR1, MMCR2 - */ - unsigned long mmcr[4]; + struct mmcr_regs mmcr; struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; @@ -121,7 +116,7 @@ static void ebb_event_add(struct perf_event *event) { } static void ebb_switch_out(unsigned long mmcr0) { } static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) { - return cpuhw->mmcr[0]; + return cpuhw->mmcr.mmcr0; } static inline void power_pmu_bhrb_enable(struct perf_event *event) {} @@ -590,7 +585,7 @@ static void ebb_switch_out(unsigned long mmcr0) static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) { - unsigned long mmcr0 = cpuhw->mmcr[0]; + unsigned long mmcr0 = cpuhw->mmcr.mmcr0; if (!ebb) goto out; @@ -624,7 +619,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) * unfreeze counters, it should not set exclude_xxx in its events and * instead manage the MMCR2 entirely by itself. */ - mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); + mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2); out: return mmcr0; } @@ -1232,9 +1227,9 @@ static void power_pmu_disable(struct pmu *pmu) /* * Disable instruction sampling if it was enabled */ - if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { + if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) { mtspr(SPRN_MMCRA, - cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); + cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); mb(); isync(); } @@ -1308,18 +1303,18 @@ static void power_pmu_enable(struct pmu *pmu) * (possibly updated for removal of events). */ if (!cpuhw->n_added) { - mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); - mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); + mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); + mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); goto out_enable; } /* * Clear all MMCR settings and recompute them for the new set of events. */ - memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); + memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, - cpuhw->mmcr, cpuhw->event)) { + &cpuhw->mmcr, cpuhw->event)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; @@ -1333,11 +1328,11 @@ static void power_pmu_enable(struct pmu *pmu) */ event = cpuhw->event[0]; if (event->attr.exclude_user) - cpuhw->mmcr[0] |= MMCR0_FCP; + cpuhw->mmcr.mmcr0 |= MMCR0_FCP; if (event->attr.exclude_kernel) - cpuhw->mmcr[0] |= freeze_events_kernel; + cpuhw->mmcr.mmcr0 |= freeze_events_kernel; if (event->attr.exclude_hv) - cpuhw->mmcr[0] |= MMCR0_FCHV; + cpuhw->mmcr.mmcr0 |= MMCR0_FCHV; } /* @@ -1346,12 +1341,12 @@ static void power_pmu_enable(struct pmu *pmu) * Then unfreeze the events. */ ppc_set_pmu_inuse(1); - mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); - mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); - mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) + mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); + mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); + mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); if (ppmu->flags & PPMU_ARCH_207S) - mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); + mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2); /* * Read off any pre-existing events that need to move @@ -1402,7 +1397,7 @@ static void power_pmu_enable(struct pmu *pmu) perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; - cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; + cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE; out_enable: pmao_restore_workaround(ebb); @@ -1418,9 +1413,9 @@ static void power_pmu_enable(struct pmu *pmu) /* * Enable instruction sampling if necessary */ - if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { + if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) { mb(); - mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); + mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra); } out: @@ -1550,7 +1545,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags) cpuhw->flags[i-1] = cpuhw->flags[i]; } --cpuhw->n_events; - ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); + ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr); if (event->hw.idx) { write_pmc(event->hw.idx, 0); event->hw.idx = 0; @@ -1571,7 +1566,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags) } if (cpuhw->n_events == 0) { /* disable exceptions if no events are running */ - cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); + cpuhw->mmcr.mmcr0 &= ~(MMCR0_PMXE | MMCR0_FCECE); } if (has_branch_stack(event)) @@ -2240,7 +2235,7 @@ static void __perf_event_interrupt(struct pt_regs *regs) * XXX might want to use MSR.PM to keep the events frozen until * we get back out of this interrupt. */ - write_mmcr0(cpuhw, cpuhw->mmcr[0]); + write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); if (nmi) nmi_exit(); @@ -2262,7 +2257,7 @@ static int power_pmu_prepare_cpu(unsigned int cpu) if (ppmu) { memset(cpuhw, 0, sizeof(*cpuhw)); - cpuhw->mmcr[0] = MMCR0_FC; + cpuhw->mmcr.mmcr0 = MMCR0_FC; } return 0; } diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 4c86da5eb28a..2fe63f2de4b0 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -363,7 +363,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) } int isa207_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], + unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[]) { unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; @@ -464,30 +464,30 @@ int isa207_compute_mmcr(u64 event[], int n_ev, } /* Return MMCRx values */ - mmcr[0] = 0; + mmcr->mmcr0 = 0; /* pmc_inuse is 1-based */ if (pmc_inuse & 2) - mmcr[0] = MMCR0_PMC1CE; + mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0x7c) - mmcr[0] |= MMCR0_PMCjCE; + mmcr->mmcr0 |= MMCR0_PMCjCE; /* If we're not using PMC 5 or 6, freeze them */ if (!(pmc_inuse & 0x60)) - mmcr[0] |= MMCR0_FC56; + mmcr->mmcr0 |= MMCR0_FC56; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; - mmcr[3] = mmcr2; + mmcr->mmcr1 = mmcr1; + mmcr->mmcra = mmcra; + mmcr->mmcr2 = mmcr2; return 0; } -void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 3) - mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); + mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); } static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size) diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 63fd4f3f6013..df968fde65a1 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -217,9 +217,9 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp); int isa207_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], + unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[]); -void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]); +void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr); int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags, const unsigned int ev_alt[][MAX_ALT]); void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c index 4d5ef92511d1..826de253aa4d 100644 --- a/arch/powerpc/perf/mpc7450-pmu.c +++ b/arch/powerpc/perf/mpc7450-pmu.c @@ -257,7 +257,7 @@ static const u32 pmcsel_mask[N_COUNTER] = { * Compute MMCR0/1/2 values for a set of events. */ static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], - unsigned long mmcr[], + struct mmcr_regs *mmcr, struct perf_event *pevents[]) { u8 event_index[N_CLASSES][N_COUNTER]; @@ -321,9 +321,16 @@ static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], mmcr0 |= MMCR0_PMCnCE; /* Return MMCRx values */ - mmcr[0] = mmcr0; - mmcr[1] = mmcr1; - mmcr[2] = mmcr2; + mmcr->mmcr0 = mmcr0; + mmcr->mmcr1 = mmcr1; + mmcr->mmcr2 = mmcr2; + /* + * 32-bit doesn't have an MMCRA and uses SPRN_MMCR2 to define + * SPRN_MMCRA. So assign mmcra of cpu_hw_events with `mmcr2` + * value to ensure that any write to this SPRN_MMCRA will + * use mmcr2 value. + */ + mmcr->mmcra = mmcr2; return 0; } @@ -331,12 +338,12 @@ static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], * Disable counting by a PMC. * Note that the pmc argument is 0-based here, not 1-based. */ -static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +static void mpc7450_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 1) - mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]); + mmcr->mmcr0 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]); else - mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]); + mmcr->mmcr1 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]); } static int mpc7450_generic_events[] = { diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c index f8574547fc6b..5f0821e54f04 100644 --- a/arch/powerpc/perf/power5+-pmu.c +++ b/arch/powerpc/perf/power5+-pmu.c @@ -448,7 +448,8 @@ static int power5p_marked_instr_event(u64 event) } static int power5p_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) + unsigned int hwc[], struct mmcr_regs *mmcr, + struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = 0; @@ -586,20 +587,20 @@ static int power5p_compute_mmcr(u64 event[], int n_ev, } /* Return MMCRx values */ - mmcr[0] = 0; + mmcr->mmcr0 = 0; if (pmc_inuse & 1) - mmcr[0] = MMCR0_PMC1CE; + mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) - mmcr[0] |= MMCR0_PMCjCE; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; + mmcr->mmcr0 |= MMCR0_PMCjCE; + mmcr->mmcr1 = mmcr1; + mmcr->mmcra = mmcra; return 0; } -static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +static void power5p_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 3) - mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); + mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); } static int power5p_generic_events[] = { diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c index da52ecab7c9a..426021d4b44c 100644 --- a/arch/powerpc/perf/power5-pmu.c +++ b/arch/powerpc/perf/power5-pmu.c @@ -379,7 +379,8 @@ static int power5_marked_instr_event(u64 event) } static int power5_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) + unsigned int hwc[], struct mmcr_regs *mmcr, + struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; @@ -528,20 +529,20 @@ static int power5_compute_mmcr(u64 event[], int n_ev, } /* Return MMCRx values */ - mmcr[0] = 0; + mmcr->mmcr0 = 0; if (pmc_inuse & 1) - mmcr[0] = MMCR0_PMC1CE; + mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) - mmcr[0] |= MMCR0_PMCjCE; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; + mmcr->mmcr0 |= MMCR0_PMCjCE; + mmcr->mmcr1 = mmcr1; + mmcr->mmcra = mmcra; return 0; } -static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +static void power5_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 3) - mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); + mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); } static int power5_generic_events[] = { diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c index 3929cacf72ed..e343a51a989e 100644 --- a/arch/powerpc/perf/power6-pmu.c +++ b/arch/powerpc/perf/power6-pmu.c @@ -171,7 +171,7 @@ static int power6_marked_instr_event(u64 event) * Assign PMC numbers and compute MMCR1 value for a set of events */ static int p6_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) + unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; @@ -243,13 +243,13 @@ static int p6_compute_mmcr(u64 event[], int n_ev, if (pmc < 4) mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc); } - mmcr[0] = 0; + mmcr->mmcr0 = 0; if (pmc_inuse & 1) - mmcr[0] = MMCR0_PMC1CE; + mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0xe) - mmcr[0] |= MMCR0_PMCjCE; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; + mmcr->mmcr0 |= MMCR0_PMCjCE; + mmcr->mmcr1 = mmcr1; + mmcr->mmcra = mmcra; return 0; } @@ -457,11 +457,11 @@ static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[]) return nalt; } -static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +static void p6_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { /* Set PMCxSEL to 0 to disable PMCx */ if (pmc <= 3) - mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); + mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power6_generic_events[] = { diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index a137813a3076..315233691314 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -242,7 +242,8 @@ static int power7_marked_instr_event(u64 event) } static int power7_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) + unsigned int hwc[], struct mmcr_regs *mmcr, + struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; @@ -298,20 +299,20 @@ static int power7_compute_mmcr(u64 event[], int n_ev, } /* Return MMCRx values */ - mmcr[0] = 0; + mmcr->mmcr0 = 0; if (pmc_inuse & 1) - mmcr[0] = MMCR0_PMC1CE; + mmcr->mmcr0 = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) - mmcr[0] |= MMCR0_PMCjCE; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; + mmcr->mmcr0 |= MMCR0_PMCjCE; + mmcr->mmcr1 = mmcr1; + mmcr->mmcra = mmcra; return 0; } -static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +static void power7_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { if (pmc <= 3) - mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); + mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc)); } static int power7_generic_events[] = { diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index 4035d93d87ab..89a90ab6a6c8 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c @@ -253,7 +253,8 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) } static int p970_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) + unsigned int hwc[], struct mmcr_regs *mmcr, + struct perf_event *pevents[]) { unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; unsigned int pmc, unit, byte, psel; @@ -393,27 +394,26 @@ static int p970_compute_mmcr(u64 event[], int n_ev, mmcra |= 0x2000; /* mark only one IOP per PPC instruction */ /* Return MMCRx values */ - mmcr[0] = mmcr0; - mmcr[1] = mmcr1; - mmcr[2] = mmcra; + mmcr->mmcr0 = mmcr0; + mmcr->mmcr1 = mmcr1; + mmcr->mmcra = mmcra; return 0; } -static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[]) +static void p970_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr) { - int shift, i; + int shift; + /* + * Setting the PMCxSEL field to 0x08 disables PMC x. + */ if (pmc <= 1) { shift = MMCR0_PMC1SEL_SH - 7 * pmc; - i = 0; + mmcr->mmcr0 = (mmcr->mmcr0 & ~(0x1fUL << shift)) | (0x08UL << shift); } else { shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2); - i = 1; + mmcr->mmcr1 = (mmcr->mmcr1 & ~(0x1fUL << shift)) | (0x08UL << shift); } - /* - * Setting the PMCxSEL field to 0x08 disables PMC x. - */ - mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift); } static int ppc970_generic_events[] = { -- cgit v1.2.3 From 7e4a145e5b675d5a9182f756950f001eaa256795 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:14 -0400 Subject: KVM: PPC: Book3S HV: Cleanup updates for kvm vcpu MMCR Currently `kvm_vcpu_arch` stores all Monitor Mode Control registers in a flat array in order: mmcr0, mmcr1, mmcra, mmcr2, mmcrs Split this to give mmcra and mmcrs its own entries in vcpu and use a flat array for mmcr0 to mmcr2. This patch implements this cleanup to make code easier to read. Signed-off-by: Athira Rajeev [mpe: Fix MMCRA/MMCR2 uapi breakage as noted by paulus] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-3-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/include/asm/kvm_host.h | 4 +++- arch/powerpc/kernel/asm-offsets.c | 2 ++ arch/powerpc/kvm/book3s_hv.c | 22 ++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 ++++++------ 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 7e2d061d0445..80b0005a7224 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -637,7 +637,9 @@ struct kvm_vcpu_arch { u32 ccr1; u32 dbsr; - u64 mmcr[5]; + u64 mmcr[3]; /* MMCR0, MMCR1, MMCR2 */ + u64 mmcra; + u64 mmcrs; u32 pmc[8]; u32 spmc[2]; u64 siar; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 6657dc6b2336..6fa4853691de 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -559,6 +559,8 @@ int main(void) OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending); OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request); OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); + OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra); + OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs); OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc); OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6bf66649ab92..b10bb404f0d5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1679,10 +1679,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_UAMOR: *val = get_reg_val(id, vcpu->arch.uamor); break; - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: i = id - KVM_REG_PPC_MMCR0; *val = get_reg_val(id, vcpu->arch.mmcr[i]); break; + case KVM_REG_PPC_MMCR2: + *val = get_reg_val(id, vcpu->arch.mmcr[2]); + break; + case KVM_REG_PPC_MMCRA: + *val = get_reg_val(id, vcpu->arch.mmcra); + break; + case KVM_REG_PPC_MMCRS: + *val = get_reg_val(id, vcpu->arch.mmcrs); + break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; *val = get_reg_val(id, vcpu->arch.pmc[i]); @@ -1900,10 +1909,19 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_UAMOR: vcpu->arch.uamor = set_reg_val(id, *val); break; - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: i = id - KVM_REG_PPC_MMCR0; vcpu->arch.mmcr[i] = set_reg_val(id, *val); break; + case KVM_REG_PPC_MMCR2: + vcpu->arch.mmcr[2] = set_reg_val(id, *val); + break; + case KVM_REG_PPC_MMCRA: + vcpu->arch.mmcra = set_reg_val(id, *val); + break; + case KVM_REG_PPC_MMCRS: + vcpu->arch.mmcrs = set_reg_val(id, *val); + break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; vcpu->arch.pmc[i] = set_reg_val(id, *val); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 71943892c81c..702eaa253042 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -3428,7 +3428,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) mtspr SPRN_PMC6, r9 ld r3, VCPU_MMCR(r4) ld r5, VCPU_MMCR + 8(r4) - ld r6, VCPU_MMCR + 16(r4) + ld r6, VCPU_MMCRA(r4) ld r7, VCPU_SIAR(r4) ld r8, VCPU_SDAR(r4) mtspr SPRN_MMCR1, r5 @@ -3436,14 +3436,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) mtspr SPRN_SIAR, r7 mtspr SPRN_SDAR, r8 BEGIN_FTR_SECTION - ld r5, VCPU_MMCR + 24(r4) + ld r5, VCPU_MMCR + 16(r4) ld r6, VCPU_SIER(r4) mtspr SPRN_MMCR2, r5 mtspr SPRN_SIER, r6 BEGIN_FTR_SECTION_NESTED(96) lwz r7, VCPU_PMC + 24(r4) lwz r8, VCPU_PMC + 28(r4) - ld r9, VCPU_MMCR + 32(r4) + ld r9, VCPU_MMCRS(r4) mtspr SPRN_SPMC1, r7 mtspr SPRN_SPMC2, r8 mtspr SPRN_MMCRS, r9 @@ -3551,9 +3551,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mfspr r8, SPRN_SDAR std r4, VCPU_MMCR(r9) std r5, VCPU_MMCR + 8(r9) - std r6, VCPU_MMCR + 16(r9) + std r6, VCPU_MMCRA(r9) BEGIN_FTR_SECTION - std r10, VCPU_MMCR + 24(r9) + std r10, VCPU_MMCR + 16(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) std r7, VCPU_SIAR(r9) std r8, VCPU_SDAR(r9) @@ -3578,7 +3578,7 @@ BEGIN_FTR_SECTION_NESTED(96) mfspr r8, SPRN_MMCRS stw r6, VCPU_PMC + 24(r9) stw r7, VCPU_PMC + 28(r9) - std r8, VCPU_MMCR + 32(r9) + std r8, VCPU_MMCRS(r9) lis r4, 0x8000 mtspr SPRN_MMCRS, r4 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) -- cgit v1.2.3 From 9d4fc86dcd510dab5521a6c891f9bf379b85a7e0 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:15 -0400 Subject: powerpc/perf: Update Power PMU cache_events to u64 type Events of type PERF_TYPE_HW_CACHE was described for Power PMU as: int (*cache_events)[type][op][result]; where type, op, result values unpacked from the event attribute config value is used to generate the raw event code at runtime. So far the event code values which used to create these cache-related events were within 32 bit and `int` type worked. In power10, some of the event codes are of 64-bit value and hence update the Power PMU cache_events to `u64` type in `power_pmu` struct. Also propagate this change to existing all PMU driver code paths which are using ppmu->cache_events. Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-4-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/include/asm/perf_event_server.h | 2 +- arch/powerpc/perf/core-book3s.c | 2 +- arch/powerpc/perf/generic-compat-pmu.c | 2 +- arch/powerpc/perf/mpc7450-pmu.c | 2 +- arch/powerpc/perf/power5+-pmu.c | 2 +- arch/powerpc/perf/power5-pmu.c | 2 +- arch/powerpc/perf/power6-pmu.c | 2 +- arch/powerpc/perf/power7-pmu.c | 2 +- arch/powerpc/perf/power8-pmu.c | 2 +- arch/powerpc/perf/power9-pmu.c | 2 +- arch/powerpc/perf/ppc970-pmu.c | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index f9a36680339d..14b8dc130598 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -53,7 +53,7 @@ struct power_pmu { const struct attribute_group **attr_groups; int n_generic; int *generic_events; - int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + u64 (*cache_events)[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 18b1b6aae692..f4d07b5efd43 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1790,7 +1790,7 @@ static void hw_perf_event_destroy(struct perf_event *event) static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; - int ev; + u64 ev; if (!ppmu->cache_events) return -EINVAL; diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c index 5e5a54d5588e..eb8a6aaf4cc1 100644 --- a/arch/powerpc/perf/generic-compat-pmu.c +++ b/arch/powerpc/perf/generic-compat-pmu.c @@ -101,7 +101,7 @@ static int compat_generic_events[] = { * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0, diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c index 826de253aa4d..1919e9df9165 100644 --- a/arch/powerpc/perf/mpc7450-pmu.c +++ b/arch/powerpc/perf/mpc7450-pmu.c @@ -361,7 +361,7 @@ static int mpc7450_generic_events[] = { * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x225 }, [C(OP_WRITE)] = { 0, 0x227 }, diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c index 5f0821e54f04..a62b2cd7914f 100644 --- a/arch/powerpc/perf/power5+-pmu.c +++ b/arch/powerpc/perf/power5+-pmu.c @@ -619,7 +619,7 @@ static int power5p_generic_events[] = { * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x1c10a8, 0x3c1088 }, [C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 }, diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c index 426021d4b44c..8732b587cf71 100644 --- a/arch/powerpc/perf/power5-pmu.c +++ b/arch/powerpc/perf/power5-pmu.c @@ -561,7 +561,7 @@ static int power5_generic_events[] = { * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x4c1090, 0x3c1088 }, [C(OP_WRITE)] = { 0x3c1090, 0xc10c3 }, diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c index e343a51a989e..0e318cf87129 100644 --- a/arch/powerpc/perf/power6-pmu.c +++ b/arch/powerpc/perf/power6-pmu.c @@ -481,7 +481,7 @@ static int power6_generic_events[] = { * are event codes. * The "DTLB" and "ITLB" events relate to the DERAT and IERAT. */ -static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x280030, 0x80080 }, [C(OP_WRITE)] = { 0x180032, 0x80088 }, diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 315233691314..5e0bf09cf077 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -333,7 +333,7 @@ static int power7_generic_events[] = { * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0xc880, 0x400f0 }, [C(OP_WRITE)] = { 0, 0x300f0 }, diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 3a5fcc20ff31..5282e8415ddf 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -253,7 +253,7 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 08c3ef796198..05dae38b969a 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -310,7 +310,7 @@ static void power9_config_bhrb(u64 pmu_bhrb_filter) * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index 89a90ab6a6c8..d35223fb112c 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c @@ -432,7 +432,7 @@ static int ppc970_generic_events[] = { * 0 means not supported, -1 means nonsensical, other values * are event codes. */ -static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +static u64 ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x8810, 0x3810 }, [C(OP_WRITE)] = { 0x7810, 0x813 }, -- cgit v1.2.3 From c718547e4a92d74089f862457adf1f617c498e16 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 17 Jul 2020 10:38:16 -0400 Subject: powerpc/perf: Add support for ISA3.1 PMU SPRs PowerISA v3.1 includes new performance monitoring unit(PMU) special purpose registers (SPRs). They are Monitor Mode Control Register 3 (MMCR3) Sampled Instruction Event Register 2 (SIER2) Sampled Instruction Event Register 3 (SIER3) MMCR3 is added for further sampling related configuration control. SIER2/SIER3 are added to provide additional information about the sampled instruction. Patch adds new PPMU flag called "PPMU_ARCH_31" to support handling of these new SPRs, updates the struct thread_struct to include these new SPRs, include MMCR3 in struct mmcr_regs. This is needed to support programming of MMCR3 SPR during event_enable/disable. Patch also adds the sysfs support for the MMCR3 SPR along with SPRN_ macros for these new pmu SPRs. Signed-off-by: Madhavan Srinivasan [mpe: Rename to PPMU_ARCH_31 as noted by jpn] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-5-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/include/asm/perf_event_server.h | 2 ++ arch/powerpc/include/asm/processor.h | 4 ++++ arch/powerpc/include/asm/reg.h | 6 ++++++ arch/powerpc/kernel/sysfs.c | 8 ++++++++ arch/powerpc/perf/core-book3s.c | 29 ++++++++++++++++++++++++++++ 5 files changed, 49 insertions(+) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 14b8dc130598..86c9eb064b22 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -22,6 +22,7 @@ struct mmcr_regs { unsigned long mmcr1; unsigned long mmcr2; unsigned long mmcra; + unsigned long mmcr3; }; /* * This struct provides the constants and functions needed to @@ -75,6 +76,7 @@ struct power_pmu { #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ +#define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 6ac12168f1fe..ed0d633ab5aa 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -271,6 +271,10 @@ struct thread_struct { unsigned mmcr0; unsigned used_ebb; + unsigned long mmcr3; + unsigned long sier2; + unsigned long sier3; + #endif }; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 88e6c78100d9..21a1b2d68121 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -876,7 +876,9 @@ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 #define SPRN_MMCR2 785 +#define SPRN_MMCR3 754 #define SPRN_UMMCR2 769 +#define SPRN_UMMCR3 738 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL @@ -918,6 +920,10 @@ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +#define SPRN_SIER2 752 +#define SPRN_SIER3 753 +#define SPRN_USIER2 736 +#define SPRN_USIER3 737 #define SPRN_SIAR 796 #define SPRN_SDAR 797 #define SPRN_TACR 888 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 571b3259697e..46b4ebc33db7 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -622,8 +622,10 @@ SYSFS_PMCSETUP(pmc7, SPRN_PMC7); SYSFS_PMCSETUP(pmc8, SPRN_PMC8); SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); +SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3); static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); +static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3); #endif /* HAS_PPC_PMC56 */ @@ -886,6 +888,9 @@ static int register_cpu_online(unsigned int cpu) #ifdef CONFIG_PMU_SYSFS if (cpu_has_feature(CPU_FTR_MMCRA)) device_create_file(s, &dev_attr_mmcra); + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + device_create_file(s, &dev_attr_mmcr3); #endif /* CONFIG_PMU_SYSFS */ if (cpu_has_feature(CPU_FTR_PURR)) { @@ -980,6 +985,9 @@ static int unregister_cpu_online(unsigned int cpu) #ifdef CONFIG_PMU_SYSFS if (cpu_has_feature(CPU_FTR_MMCRA)) device_remove_file(s, &dev_attr_mmcra); + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + device_remove_file(s, &dev_attr_mmcr3); #endif /* CONFIG_PMU_SYSFS */ if (cpu_has_feature(CPU_FTR_PURR)) { diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index f4d07b5efd43..cedd558f3c13 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -72,6 +72,11 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; /* * 32-bit doesn't have MMCRA but does have an MMCR2, * and a few other names are different. + * Also 32-bit doesn't have MMCR3, SIER2 and SIER3. + * Define them as zero knowing that any code path accessing + * these registers (via mtspr/mfspr) are done under ppmu flag + * check for PPMU_ARCH_31 and we will not enter that code path + * for 32-bit. */ #ifdef CONFIG_PPC32 @@ -85,6 +90,9 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; #define MMCR0_PMCC_U6 0 #define SPRN_MMCRA SPRN_MMCR2 +#define SPRN_MMCR3 0 +#define SPRN_SIER2 0 +#define SPRN_SIER3 0 #define MMCRA_SAMPLE_ENABLE 0 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) @@ -581,6 +589,11 @@ static void ebb_switch_out(unsigned long mmcr0) current->thread.sdar = mfspr(SPRN_SDAR); current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; + if (ppmu->flags & PPMU_ARCH_31) { + current->thread.mmcr3 = mfspr(SPRN_MMCR3); + current->thread.sier2 = mfspr(SPRN_SIER2); + current->thread.sier3 = mfspr(SPRN_SIER3); + } } static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) @@ -620,6 +633,12 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) * instead manage the MMCR2 entirely by itself. */ mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2); + + if (ppmu->flags & PPMU_ARCH_31) { + mtspr(SPRN_MMCR3, current->thread.mmcr3); + mtspr(SPRN_SIER2, current->thread.sier2); + mtspr(SPRN_SIER3, current->thread.sier3); + } out: return mmcr0; } @@ -840,6 +859,11 @@ void perf_event_print_debug(void) pr_info("EBBRR: %016lx BESCR: %016lx\n", mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); } + + if (ppmu->flags & PPMU_ARCH_31) { + pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n", + mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3)); + } #endif pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n", mfspr(SPRN_SIAR), sdar, sier); @@ -1305,6 +1329,8 @@ static void power_pmu_enable(struct pmu *pmu) if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); + if (ppmu->flags & PPMU_ARCH_31) + mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3); goto out_enable; } @@ -1348,6 +1374,9 @@ static void power_pmu_enable(struct pmu *pmu) if (ppmu->flags & PPMU_ARCH_207S) mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2); + if (ppmu->flags & PPMU_ARCH_31) + mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3); + /* * Read off any pre-existing events that need to move * to another PMC. -- cgit v1.2.3 From 5752fe0b811bb3cee531c52074921c6dd09dc42d Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:17 -0400 Subject: KVM: PPC: Book3S HV: Save/restore new PMU registers Power ISA v3.1 has added new performance monitoring unit (PMU) special purpose registers (SPRs). They are: Monitor Mode Control Register 3 (MMCR3) Sampled Instruction Event Register A (SIER2) Sampled Instruction Event Register B (SIER3) Add support to save/restore these new SPRs while entering/exiting guest. Also include changes to support KVM_REG_PPC_MMCR3/SIER2/SIER3. Add new SPRs to KVM API documentation. Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-6-git-send-email-atrajeev@linux.vnet.ibm.com --- Documentation/virt/kvm/api.rst | 3 +++ arch/powerpc/include/asm/kvm_book3s_asm.h | 2 +- arch/powerpc/include/asm/kvm_host.h | 4 ++-- arch/powerpc/include/uapi/asm/kvm.h | 5 +++++ arch/powerpc/kernel/asm-offsets.c | 3 +++ arch/powerpc/kvm/book3s_hv.c | 22 ++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_interrupts.S | 8 ++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 24 ++++++++++++++++++++++++ tools/arch/powerpc/include/uapi/asm/kvm.h | 5 +++++ 9 files changed, 71 insertions(+), 5 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 426f94582b7a..8cb6eb76c129 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -2156,9 +2156,12 @@ registers, find a list below: PPC KVM_REG_PPC_MMCRA 64 PPC KVM_REG_PPC_MMCR2 64 PPC KVM_REG_PPC_MMCRS 64 + PPC KVM_REG_PPC_MMCR3 64 PPC KVM_REG_PPC_SIAR 64 PPC KVM_REG_PPC_SDAR 64 PPC KVM_REG_PPC_SIER 64 + PPC KVM_REG_PPC_SIER2 64 + PPC KVM_REG_PPC_SIER3 64 PPC KVM_REG_PPC_PMC1 32 PPC KVM_REG_PPC_PMC2 32 PPC KVM_REG_PPC_PMC3 32 diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 45704f2716e2..078f4648ea27 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -119,7 +119,7 @@ struct kvmppc_host_state { void __iomem *xive_tima_virt; u32 saved_xirr; u64 dabr; - u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */ + u64 host_mmcr[10]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER, MMCR3, SIER2/3 */ u32 host_pmc[8]; u64 host_purr; u64 host_spurr; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 80b0005a7224..e020d269416d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -637,14 +637,14 @@ struct kvm_vcpu_arch { u32 ccr1; u32 dbsr; - u64 mmcr[3]; /* MMCR0, MMCR1, MMCR2 */ + u64 mmcr[4]; /* MMCR0, MMCR1, MMCR2, MMCR3 */ u64 mmcra; u64 mmcrs; u32 pmc[8]; u32 spmc[2]; u64 siar; u64 sdar; - u64 sier; + u64 sier[3]; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM u64 tfhar; u64 texasr; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 264e266a85bf..c3af3f324c5a 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -640,6 +640,11 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) #define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0) +/* POWER10 registers */ +#define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) +#define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) +#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) + /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 6fa4853691de..8711c2164b45 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -698,6 +698,9 @@ int main(void) HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]); HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]); HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]); + HSTATE_FIELD(HSTATE_MMCR3, host_mmcr[7]); + HSTATE_FIELD(HSTATE_SIER2, host_mmcr[8]); + HSTATE_FIELD(HSTATE_SIER3, host_mmcr[9]); HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]); HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]); HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index b10bb404f0d5..0a0df31987f7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1692,6 +1692,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_MMCRS: *val = get_reg_val(id, vcpu->arch.mmcrs); break; + case KVM_REG_PPC_MMCR3: + *val = get_reg_val(id, vcpu->arch.mmcr[3]); + break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; *val = get_reg_val(id, vcpu->arch.pmc[i]); @@ -1707,7 +1710,13 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.sdar); break; case KVM_REG_PPC_SIER: - *val = get_reg_val(id, vcpu->arch.sier); + *val = get_reg_val(id, vcpu->arch.sier[0]); + break; + case KVM_REG_PPC_SIER2: + *val = get_reg_val(id, vcpu->arch.sier[1]); + break; + case KVM_REG_PPC_SIER3: + *val = get_reg_val(id, vcpu->arch.sier[2]); break; case KVM_REG_PPC_IAMR: *val = get_reg_val(id, vcpu->arch.iamr); @@ -1922,6 +1931,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_MMCRS: vcpu->arch.mmcrs = set_reg_val(id, *val); break; + case KVM_REG_PPC_MMCR3: + *val = get_reg_val(id, vcpu->arch.mmcr[3]); + break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; vcpu->arch.pmc[i] = set_reg_val(id, *val); @@ -1937,7 +1949,13 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.sdar = set_reg_val(id, *val); break; case KVM_REG_PPC_SIER: - vcpu->arch.sier = set_reg_val(id, *val); + vcpu->arch.sier[0] = set_reg_val(id, *val); + break; + case KVM_REG_PPC_SIER2: + vcpu->arch.sier[1] = set_reg_val(id, *val); + break; + case KVM_REG_PPC_SIER3: + vcpu->arch.sier[2] = set_reg_val(id, *val); break; case KVM_REG_PPC_IAMR: vcpu->arch.iamr = set_reg_val(id, *val); diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 63fd81f3039d..59822cba454d 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -140,6 +140,14 @@ BEGIN_FTR_SECTION std r8, HSTATE_MMCR2(r13) std r9, HSTATE_SIER(r13) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION + mfspr r5, SPRN_MMCR3 + mfspr r6, SPRN_SIER2 + mfspr r7, SPRN_SIER3 + std r5, HSTATE_MMCR3(r13) + std r6, HSTATE_SIER2(r13) + std r7, HSTATE_SIER3(r13) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) mfspr r3, SPRN_PMC1 mfspr r5, SPRN_PMC2 mfspr r6, SPRN_PMC3 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 702eaa253042..799d6d0f4ead 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -3435,6 +3435,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) mtspr SPRN_MMCRA, r6 mtspr SPRN_SIAR, r7 mtspr SPRN_SDAR, r8 +BEGIN_FTR_SECTION + ld r5, VCPU_MMCR + 24(r4) + ld r6, VCPU_SIER + 8(r4) + ld r7, VCPU_SIER + 16(r4) + mtspr SPRN_MMCR3, r5 + mtspr SPRN_SIER2, r6 + mtspr SPRN_SIER3, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) BEGIN_FTR_SECTION ld r5, VCPU_MMCR + 16(r4) ld r6, VCPU_SIER(r4) @@ -3496,6 +3504,14 @@ BEGIN_FTR_SECTION mtspr SPRN_MMCR2, r8 mtspr SPRN_SIER, r9 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION + ld r5, HSTATE_MMCR3(r13) + ld r6, HSTATE_SIER2(r13) + ld r7, HSTATE_SIER3(r13) + mtspr SPRN_MMCR3, r5 + mtspr SPRN_SIER2, r6 + mtspr SPRN_SIER3, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) mtspr SPRN_MMCR0, r3 isync mtlr r0 @@ -3555,6 +3571,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION std r10, VCPU_MMCR + 16(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION + mfspr r5, SPRN_MMCR3 + mfspr r6, SPRN_SIER2 + mfspr r7, SPRN_SIER3 + std r5, VCPU_MMCR + 24(r9) + std r6, VCPU_SIER + 8(r9) + std r7, VCPU_SIER + 16(r9) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) std r7, VCPU_SIAR(r9) std r8, VCPU_SDAR(r9) mfspr r3, SPRN_PMC1 diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 264e266a85bf..c3af3f324c5a 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -640,6 +640,11 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) #define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0) +/* POWER10 registers */ +#define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1) +#define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2) +#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) + /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs */ -- cgit v1.2.3 From 1979ae8c7215718c7a98f038bad0122034ad6529 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 17 Jul 2020 10:38:18 -0400 Subject: powerpc/xmon: Add PowerISA v3.1 PMU SPRs PowerISA v3.1 added three new perfromance monitoring unit (PMU) speical purpose register (SPR). They are Monitor Mode Control Register 3 (MMCR3), Sampled Instruction Event Register 2 (SIER2), Sampled Instruction Event Register 3 (SIER3). Patch here adds a new dump function dump_310_sprs to print these SPR values. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-7-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/xmon/xmon.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d27944e38b04..f08b2ef1995a 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2029,6 +2029,18 @@ static void dump_300_sprs(void) #endif } +static void dump_310_sprs(void) +{ +#ifdef CONFIG_PPC64 + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + return; + + printf("mmcr3 = %.16lx, sier2 = %.16lx, sier3 = %.16lx\n", + mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3)); + +#endif +} + static void dump_one_spr(int spr, bool show_unimplemented) { unsigned long val; @@ -2083,6 +2095,7 @@ static void super_regs(void) dump_206_sprs(); dump_207_sprs(); dump_300_sprs(); + dump_310_sprs(); return; } -- cgit v1.2.3 From 9908c826d5ed150637a3a4c0eec5146a0c438f21 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Fri, 17 Jul 2020 10:38:19 -0400 Subject: powerpc/perf: Add Power10 PMU feature to DT CPU features Add Power10 feature function to DT CPU features, along with a Power10 specific init() to initialize PMU SPRs, sets the oprofile_cpu_type and cpu_features. This will enable performance monitoring unit (PMU) for Power10 in CPU features with "performance-monitor-power10". For Power ISA v3.1, BHRB disable is controlled via Monitor Mode Control Register A (MMCRA) bit, namely "BHRB Recording Disable (BHRBRD)". This patch initializes MMCRA BHRBRD to disable BHRB feature at boot for Power10. Signed-off-by: Madhavan Srinivasan [mpe: Move MMCRA_BHRB_DISABLE as noted by jpn, drop CPU setup changes] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-8-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/include/asm/reg.h | 1 + arch/powerpc/kernel/dt_cpu_ftrs.c | 26 ++++++++++++++++++++++++++ arch/powerpc/perf/core-book3s.c | 1 + 3 files changed, 28 insertions(+) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 21a1b2d68121..97efbe2973fa 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -888,6 +888,7 @@ #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ #define MMCRA_SLOT_SHIFT 24 #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ +#define MMCRA_BHRB_DISABLE 0x2000000000UL // BHRB disable bit for ISA v3.1 #define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ #define POWER6_MMCRA_SIHV 0x0000040000000000ULL #define POWER6_MMCRA_SIPR 0x0000020000000000ULL diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 9aa8537f7da2..0d65d9597778 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -450,6 +450,31 @@ static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f) return 1; } +static void init_pmu_power10(void) +{ + init_pmu_power9(); + + mtspr(SPRN_MMCR3, 0); + mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE); +} + +static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f) +{ + hfscr_pmu_enable(); + + init_pmu_power10(); + init_pmu_registers = init_pmu_power10; + + cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT; + + cur_cpu_spec->num_pmcs = 6; + cur_cpu_spec->pmc_type = PPC_PMC_IBM; + cur_cpu_spec->oprofile_cpu_type = "ppc64/power10"; + + return 1; +} + static int __init feat_enable_tm(struct dt_cpu_feature *f) { #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -639,6 +664,7 @@ static struct dt_cpu_feature_match __initdata {"pc-relative-addressing", feat_enable, 0}, {"machine-check-power9", feat_enable_mce_power9, 0}, {"performance-monitor-power9", feat_enable_pmu_power9, 0}, + {"performance-monitor-power10", feat_enable_pmu_power10, 0}, {"event-based-branch-v3", feat_enable, 0}, {"random-number-generator", feat_enable, 0}, {"system-call-vectored", feat_disable, 0}, diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cedd558f3c13..277d975b478c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -94,6 +94,7 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; #define SPRN_SIER2 0 #define SPRN_SIER3 0 #define MMCRA_SAMPLE_ENABLE 0 +#define MMCRA_BHRB_DISABLE 0 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { -- cgit v1.2.3 From a64e697cef23b3d24bac700f6d66c8e2bf8efccc Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:20 -0400 Subject: powerpc/perf: power10 Performance Monitoring support Base enablement patch to register performance monitoring hardware support for power10. Patch introduce the raw event encoding format, defines the supported list of events, config fields for the event attributes and their corresponding bit values which are exported via sysfs. Patch also enhances the support function in isa207_common.c to include power10 pmu hardware. Reported-by: kernel test robot Signed-off-by: Madhavan Srinivasan Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-9-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/perf/Makefile | 2 +- arch/powerpc/perf/core-book3s.c | 2 + arch/powerpc/perf/internal.h | 1 + arch/powerpc/perf/isa207-common.c | 59 ++++- arch/powerpc/perf/isa207-common.h | 33 ++- arch/powerpc/perf/power10-events-list.h | 70 ++++++ arch/powerpc/perf/power10-pmu.c | 410 ++++++++++++++++++++++++++++++++ 7 files changed, 566 insertions(+), 11 deletions(-) create mode 100644 arch/powerpc/perf/power10-events-list.h create mode 100644 arch/powerpc/perf/power10-pmu.c diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index 53d614e98537..c02854dea2b2 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \ power5+-pmu.o power6-pmu.o power7-pmu.o \ isa207-common.o power8-pmu.o power9-pmu.o \ - generic-compat-pmu.o + generic-compat-pmu.o power10-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 277d975b478c..6bffffa22ee8 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2333,6 +2333,8 @@ static int __init init_ppc64_pmu(void) return 0; else if (!init_power9_pmu()) return 0; + else if (!init_power10_pmu()) + return 0; else if (!init_ppc970_pmu()) return 0; else diff --git a/arch/powerpc/perf/internal.h b/arch/powerpc/perf/internal.h index f755c64da137..80bbf72bfec2 100644 --- a/arch/powerpc/perf/internal.h +++ b/arch/powerpc/perf/internal.h @@ -9,4 +9,5 @@ extern int init_power6_pmu(void); extern int init_power7_pmu(void); extern int init_power8_pmu(void); extern int init_power9_pmu(void); +extern int init_power10_pmu(void); extern int init_generic_compat_pmu(void); diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 2fe63f2de4b0..77643f3ae464 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -55,7 +55,9 @@ static bool is_event_valid(u64 event) { u64 valid_mask = EVENT_VALID_MASK; - if (cpu_has_feature(CPU_FTR_ARCH_300)) + if (cpu_has_feature(CPU_FTR_ARCH_31)) + valid_mask = p10_EVENT_VALID_MASK; + else if (cpu_has_feature(CPU_FTR_ARCH_300)) valid_mask = p9_EVENT_VALID_MASK; return !(event & ~valid_mask); @@ -69,6 +71,14 @@ static inline bool is_event_marked(u64 event) return false; } +static unsigned long sdar_mod_val(u64 event) +{ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + return p10_SDAR_MODE(event); + + return p9_SDAR_MODE(event); +} + static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) { /* @@ -79,7 +89,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling * mode and will be un-changed when setting MMCRA[63] (Marked events). * - * Incase of Power9: + * Incase of Power9/power10: * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'), * or if group already have any marked events. * For rest @@ -90,8 +100,8 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE)) *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES; - else if (p9_SDAR_MODE(event)) - *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; + else if (sdar_mod_val(event)) + *mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT; else *mmcra |= MMCRA_SDAR_MODE_DCACHE; } else @@ -134,7 +144,11 @@ static bool is_thresh_cmp_valid(u64 event) /* * Check the mantissa upper two bits are not zero, unless the * exponent is also zero. See the THRESH_CMP_MANTISSA doc. + * Power10: thresh_cmp is replaced by l2_l3 event select. */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + return false; + cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; exp = cmp >> 7; @@ -251,7 +265,12 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; - cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; + if (cpu_has_feature(CPU_FTR_ARCH_31)) + cache = (event >> EVENT_CACHE_SEL_SHIFT) & + p10_EVENT_CACHE_SEL_MASK; + else + cache = (event >> EVENT_CACHE_SEL_SHIFT) & + EVENT_CACHE_SEL_MASK; ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; if (pmc) { @@ -283,7 +302,10 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) } if (unit >= 6 && unit <= 9) { - if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) { + mask |= CNST_L2L3_GROUP_MASK; + value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT); + } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { mask |= CNST_CACHE_GROUP_MASK; value |= CNST_CACHE_GROUP_VAL(event & 0xff); @@ -367,6 +389,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, struct perf_event *pevents[]) { unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; + unsigned long mmcr3; unsigned int pmc, pmc_inuse; int i; @@ -379,7 +402,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, pmc_inuse |= 1 << pmc; } - mmcra = mmcr1 = mmcr2 = 0; + mmcra = mmcr1 = mmcr2 = mmcr3 = 0; /* Second pass: assign PMCs, set all MMCR1 fields */ for (i = 0; i < n_ev; ++i) { @@ -438,8 +461,17 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcra |= val << MMCRA_THR_CTL_SHIFT; val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; mmcra |= val << MMCRA_THR_SEL_SHIFT; - val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; - mmcra |= thresh_cmp_val(val); + if (!cpu_has_feature(CPU_FTR_ARCH_31)) { + val = (event[i] >> EVENT_THR_CMP_SHIFT) & + EVENT_THR_CMP_MASK; + mmcra |= thresh_cmp_val(val); + } + } + + if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) { + val = (event[i] >> p10_L2L3_EVENT_SHIFT) & + p10_EVENT_L2L3_SEL_MASK; + mmcr2 |= val << p10_L2L3_SEL_SHIFT; } if (event[i] & EVENT_WANTS_BHRB) { @@ -460,6 +492,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcr2 |= MMCR2_FCS(pmc); } + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + if (pmc <= 4) { + val = (event[i] >> p10_EVENT_MMCR3_SHIFT) & + p10_EVENT_MMCR3_MASK; + mmcr3 |= val << MMCR3_SHIFT(pmc); + } + } + hwc[i] = pmc - 1; } @@ -480,6 +520,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcr->mmcr1 = mmcr1; mmcr->mmcra = mmcra; mmcr->mmcr2 = mmcr2; + mmcr->mmcr3 = mmcr3; return 0; } diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index df968fde65a1..044de65e96b9 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -87,6 +87,31 @@ EVENT_LINUX_MASK | \ EVENT_PSEL_MASK)) +/* Contants to support power10 raw encoding format */ +#define p10_SDAR_MODE_SHIFT 22 +#define p10_SDAR_MODE_MASK 0x3ull +#define p10_SDAR_MODE(v) (((v) >> p10_SDAR_MODE_SHIFT) & \ + p10_SDAR_MODE_MASK) +#define p10_EVENT_L2L3_SEL_MASK 0x1f +#define p10_L2L3_SEL_SHIFT 3 +#define p10_L2L3_EVENT_SHIFT 40 +#define p10_EVENT_THRESH_MASK 0xffffull +#define p10_EVENT_CACHE_SEL_MASK 0x3ull +#define p10_EVENT_MMCR3_MASK 0x7fffull +#define p10_EVENT_MMCR3_SHIFT 45 + +#define p10_EVENT_VALID_MASK \ + ((p10_SDAR_MODE_MASK << p10_SDAR_MODE_SHIFT | \ + (p10_EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ + (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ + (p10_EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ + (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ + (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ + (p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT) | \ + (p10_EVENT_MMCR3_MASK << p10_EVENT_MMCR3_SHIFT) | \ + (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ + EVENT_LINUX_MASK | \ + EVENT_PSEL_MASK)) /* * Layout of constraint bits: * @@ -135,6 +160,9 @@ #define CNST_CACHE_PMC4_VAL (1ull << 54) #define CNST_CACHE_PMC4_MASK CNST_CACHE_PMC4_VAL +#define CNST_L2L3_GROUP_VAL(v) (((v) & 0x1full) << 55) +#define CNST_L2L3_GROUP_MASK CNST_L2L3_GROUP_VAL(0x1f) + /* * For NC we are counting up to 4 events. This requires three bits, and we need * the fifth event to overflow and set the 4th bit. To achieve that we bias the @@ -191,7 +219,7 @@ #define MMCRA_THR_CTR_EXP(v) (((v) >> MMCRA_THR_CTR_EXP_SHIFT) &\ MMCRA_THR_CTR_EXP_MASK) -/* MMCR1 Threshold Compare bit constant for power9 */ +/* MMCRA Threshold Compare bit constant for power9 */ #define p9_MMCRA_THR_CMP_SHIFT 45 /* Bits in MMCR2 for PowerISA v2.07 */ @@ -202,6 +230,9 @@ #define MAX_ALT 2 #define MAX_PMU_COUNTERS 6 +/* Bits in MMCR3 for PowerISA v3.10 */ +#define MMCR3_SHIFT(pmc) (49 - (15 * ((pmc) - 1))) + #define ISA207_SIER_TYPE_SHIFT 15 #define ISA207_SIER_TYPE_MASK (0x7ull << ISA207_SIER_TYPE_SHIFT) diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h new file mode 100644 index 000000000000..60c1b8111082 --- /dev/null +++ b/arch/powerpc/perf/power10-events-list.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Performance counter support for POWER10 processors. + * + * Copyright 2020 Madhavan Srinivasan, IBM Corporation. + * Copyright 2020 Athira Rajeev, IBM Corporation. + */ + +/* + * Power10 event codes. + */ +EVENT(PM_RUN_CYC, 0x600f4); +EVENT(PM_DISP_STALL_CYC, 0x100f8); +EVENT(PM_EXEC_STALL, 0x30008); +EVENT(PM_RUN_INST_CMPL, 0x500fa); +EVENT(PM_BR_CMPL, 0x4d05e); +EVENT(PM_BR_MPRED_CMPL, 0x400f6); + +/* All L1 D cache load references counted at finish, gated by reject */ +EVENT(PM_LD_REF_L1, 0x100fc); +/* Load Missed L1 */ +EVENT(PM_LD_MISS_L1, 0x3e054); +/* Store Missed L1 */ +EVENT(PM_ST_MISS_L1, 0x300f0); +/* L1 cache data prefetches */ +EVENT(PM_LD_PREFETCH_CACHE_LINE_MISS, 0x1002c); +/* Demand iCache Miss */ +EVENT(PM_L1_ICACHE_MISS, 0x200fc); +/* Instruction fetches from L1 */ +EVENT(PM_INST_FROM_L1, 0x04080); +/* Instruction Demand sectors wriittent into IL1 */ +EVENT(PM_INST_FROM_L1MISS, 0x03f00000001c040); +/* Instruction prefetch written into IL1 */ +EVENT(PM_IC_PREF_REQ, 0x040a0); +/* The data cache was reloaded from local core's L3 due to a demand load */ +EVENT(PM_DATA_FROM_L3, 0x01340000001c040); +/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ +EVENT(PM_DATA_FROM_L3MISS, 0x300fe); +/* Data PTEG reload */ +EVENT(PM_DTLB_MISS, 0x300fc); +/* ITLB Reloaded */ +EVENT(PM_ITLB_MISS, 0x400fc); + +EVENT(PM_RUN_CYC_ALT, 0x0001e); +EVENT(PM_RUN_INST_CMPL_ALT, 0x00002); + +/* + * Memory Access Events + * + * Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0) + * To enable capturing of memory profiling, these MMCRA bits + * needs to be programmed and corresponding raw event format + * encoding. + * + * MMCRA bits encoding needed are + * SM (Sampling Mode) + * EM (Eligibility for Random Sampling) + * TECE (Threshold Event Counter Event) + * TS (Threshold Start Event) + * TE (Threshold End Event) + * + * Corresponding Raw Encoding bits: + * sample [EM,SM] + * thresh_sel (TECE) + * thresh start (TS) + * thresh end (TE) + */ + +EVENT(MEM_LOADS, 0x34340401e0); +EVENT(MEM_STORES, 0x343c0401e0); diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c new file mode 100644 index 000000000000..4b65e95c1e01 --- /dev/null +++ b/arch/powerpc/perf/power10-pmu.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Performance counter support for POWER10 processors. + * + * Copyright 2020 Madhavan Srinivasan, IBM Corporation. + * Copyright 2020 Athira Rajeev, IBM Corporation. + */ + +#define pr_fmt(fmt) "power10-pmu: " fmt + +#include "isa207-common.h" +#include "internal.h" + +/* + * Raw event encoding for Power10: + * + * 60 56 52 48 44 40 36 32 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * | | [ ] [ src_match ] [ src_mask ] | [ ] [ l2l3_sel ] [ thresh_ctl ] + * | | | | | | + * | | *- IFM (Linux) | | thresh start/stop -* + * | *- BHRB (Linux) | src_sel + * *- EBB (Linux) *invert_bit + * + * 28 24 20 16 12 8 4 0 + * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | + * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] m [ pmcxsel ] + * | | | | | | + * | | | | | *- mark + * | | | *- L1/L2/L3 cache_sel | + * | | sdar_mode | + * | *- sampling mode for marked events *- combine + * | + * *- thresh_sel + * + * Below uses IBM bit numbering. + * + * MMCR1[x:y] = unit (PMCxUNIT) + * MMCR1[24] = pmc1combine[0] + * MMCR1[25] = pmc1combine[1] + * MMCR1[26] = pmc2combine[0] + * MMCR1[27] = pmc2combine[1] + * MMCR1[28] = pmc3combine[0] + * MMCR1[29] = pmc3combine[1] + * MMCR1[30] = pmc4combine[0] + * MMCR1[31] = pmc4combine[1] + * + * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 + * MMCR1[20:27] = thresh_ctl + * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 + * MMCR1[20:27] = thresh_ctl + * else + * MMCRA[48:55] = thresh_ctl (THRESH START/END) + * + * if thresh_sel: + * MMCRA[45:47] = thresh_sel + * + * if l2l3_sel: + * MMCR2[56:60] = l2l3_sel[0:4] + * + * MMCR1[16] = cache_sel[0] + * MMCR1[17] = cache_sel[1] + * + * if mark: + * MMCRA[63] = 1 (SAMPLE_ENABLE) + * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) + * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) + * + * if EBB and BHRB: + * MMCRA[32:33] = IFM + * + * MMCRA[SDAR_MODE] = sdar_mode[0:1] + */ + +/* + * Some power10 event codes. + */ +#define EVENT(_name, _code) enum{_name = _code} + +#include "power10-events-list.h" + +#undef EVENT + +/* MMCRA IFM bits - POWER10 */ +#define POWER10_MMCRA_IFM1 0x0000000040000000UL +#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL + +/* Table of alternatives, sorted by column 0 */ +static const unsigned int power10_event_alternatives[][MAX_ALT] = { + { PM_RUN_CYC_ALT, PM_RUN_CYC }, + { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, +}; + +static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[]) +{ + int num_alt = 0; + + num_alt = isa207_get_alternatives(event, alt, + ARRAY_SIZE(power10_event_alternatives), flags, + power10_event_alternatives); + + return num_alt; +} + +GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC); +GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL); +GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL); +GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); +GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); +GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); +GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); +GENERIC_EVENT_ATTR(mem-stores, MEM_STORES); + +CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); +CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); +CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS); +CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); +CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); +CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); +CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ); +CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS); +CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); +CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); +CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL); +CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); +CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); + +static struct attribute *power10_events_attr[] = { + GENERIC_EVENT_PTR(PM_RUN_CYC), + GENERIC_EVENT_PTR(PM_RUN_INST_CMPL), + GENERIC_EVENT_PTR(PM_BR_CMPL), + GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), + GENERIC_EVENT_PTR(PM_LD_REF_L1), + GENERIC_EVENT_PTR(PM_LD_MISS_L1), + GENERIC_EVENT_PTR(MEM_LOADS), + GENERIC_EVENT_PTR(MEM_STORES), + CACHE_EVENT_PTR(PM_LD_MISS_L1), + CACHE_EVENT_PTR(PM_LD_REF_L1), + CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS), + CACHE_EVENT_PTR(PM_ST_MISS_L1), + CACHE_EVENT_PTR(PM_L1_ICACHE_MISS), + CACHE_EVENT_PTR(PM_INST_FROM_L1), + CACHE_EVENT_PTR(PM_IC_PREF_REQ), + CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), + CACHE_EVENT_PTR(PM_DATA_FROM_L3), + CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), + CACHE_EVENT_PTR(PM_BR_CMPL), + CACHE_EVENT_PTR(PM_DTLB_MISS), + CACHE_EVENT_PTR(PM_ITLB_MISS), + NULL +}; + +static struct attribute_group power10_pmu_events_group = { + .name = "events", + .attrs = power10_events_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-59"); +PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); +PMU_FORMAT_ATTR(mark, "config:8"); +PMU_FORMAT_ATTR(combine, "config:10-11"); +PMU_FORMAT_ATTR(unit, "config:12-15"); +PMU_FORMAT_ATTR(pmc, "config:16-19"); +PMU_FORMAT_ATTR(cache_sel, "config:20-21"); +PMU_FORMAT_ATTR(sdar_mode, "config:22-23"); +PMU_FORMAT_ATTR(sample_mode, "config:24-28"); +PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); +PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); +PMU_FORMAT_ATTR(thresh_start, "config:36-39"); +PMU_FORMAT_ATTR(l2l3_sel, "config:40-44"); +PMU_FORMAT_ATTR(src_sel, "config:45-46"); +PMU_FORMAT_ATTR(invert_bit, "config:47"); +PMU_FORMAT_ATTR(src_mask, "config:48-53"); +PMU_FORMAT_ATTR(src_match, "config:54-59"); + +static struct attribute *power10_pmu_format_attr[] = { + &format_attr_event.attr, + &format_attr_pmcxsel.attr, + &format_attr_mark.attr, + &format_attr_combine.attr, + &format_attr_unit.attr, + &format_attr_pmc.attr, + &format_attr_cache_sel.attr, + &format_attr_sdar_mode.attr, + &format_attr_sample_mode.attr, + &format_attr_thresh_sel.attr, + &format_attr_thresh_stop.attr, + &format_attr_thresh_start.attr, + &format_attr_l2l3_sel.attr, + &format_attr_src_sel.attr, + &format_attr_invert_bit.attr, + &format_attr_src_mask.attr, + &format_attr_src_match.attr, + NULL, +}; + +static struct attribute_group power10_pmu_format_group = { + .name = "format", + .attrs = power10_pmu_format_attr, +}; + +static const struct attribute_group *power10_pmu_attr_groups[] = { + &power10_pmu_format_group, + &power10_pmu_events_group, + NULL, +}; + +static int power10_generic_events[] = { + [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC, + [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL, + [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, +}; + +static u64 power10_bhrb_filter_map(u64 branch_sample_type) +{ + u64 pmu_bhrb_filter = 0; + + /* BHRB and regular PMU events share the same privilege state + * filter configuration. BHRB is always recorded along with a + * regular PMU event. As the privilege state filter is handled + * in the basic PMC configuration of the accompanying regular + * PMU event, we ignore any separate BHRB specific request. + */ + + /* No branch filter requested */ + if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) + return pmu_bhrb_filter; + + /* Invalid branch filter options - HW does not support */ + if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) + return -1; + + if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) + return -1; + + if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL) + return -1; + + if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { + pmu_bhrb_filter |= POWER10_MMCRA_IFM1; + return pmu_bhrb_filter; + } + + /* Every thing else is unsupported */ + return -1; +} + +static void power10_config_bhrb(u64 pmu_bhrb_filter) +{ + pmu_bhrb_filter &= POWER10_MMCRA_BHRB_MASK; + + /* Enable BHRB filter in PMU */ + mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); +} + +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PM_LD_REF_L1, + [C(RESULT_MISS)] = PM_LD_MISS_L1, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = PM_ST_MISS_L1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS, + [C(RESULT_MISS)] = 0, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PM_INST_FROM_L1, + [C(RESULT_MISS)] = PM_L1_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = PM_IC_PREF_REQ, + [C(RESULT_MISS)] = 0, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PM_DATA_FROM_L3, + [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = 0, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = PM_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = PM_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = PM_BR_CMPL, + [C(RESULT_MISS)] = PM_BR_MPRED_CMPL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + }, +}; + +#undef C + +static struct power_pmu power10_pmu = { + .name = "POWER10", + .n_counter = MAX_PMU_COUNTERS, + .add_fields = ISA207_ADD_FIELDS, + .test_adder = ISA207_TEST_ADDER, + .group_constraint_mask = CNST_CACHE_PMC4_MASK, + .group_constraint_val = CNST_CACHE_PMC4_VAL, + .compute_mmcr = isa207_compute_mmcr, + .config_bhrb = power10_config_bhrb, + .bhrb_filter_map = power10_bhrb_filter_map, + .get_constraint = isa207_get_constraint, + .get_alternatives = power10_get_alternatives, + .get_mem_data_src = isa207_get_mem_data_src, + .get_mem_weight = isa207_get_mem_weight, + .disable_pmc = isa207_disable_pmc, + .flags = PPMU_HAS_SIER | PPMU_ARCH_207S | + PPMU_ARCH_31, + .n_generic = ARRAY_SIZE(power10_generic_events), + .generic_events = power10_generic_events, + .cache_events = &power10_cache_events, + .attr_groups = power10_pmu_attr_groups, + .bhrb_nr = 32, +}; + +int init_power10_pmu(void) +{ + int rc; + + /* Comes from cpu_specs[] */ + if (!cur_cpu_spec->oprofile_cpu_type || + strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10")) + return -ENODEV; + + rc = register_power_pmu(&power10_pmu); + if (rc) + return rc; + + /* Tell userspace that EBB is supported */ + cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; + + return 0; +} -- cgit v1.2.3 From bfe3b1945d5e0531103b3d4ab3a367a1a156d99a Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:21 -0400 Subject: powerpc/perf: Ignore the BHRB kernel address filtering for P10 Commit bb19af816025 ("powerpc/perf: Prevent kernel address leak to userspace via BHRB buffer") added a check in bhrb_read() to filter the kernel address from BHRB buffer. This patch modified it to avoid that check for PowerISA v3.1 based processors, since PowerISA v3.1 allows only MSR[PR]=1 address to be written to BHRB buffer. Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-10-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/perf/core-book3s.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 6bffffa22ee8..e31629fb6e7e 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -470,8 +470,11 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events * * addresses at this point. Check the privileges before * exporting it to userspace (avoid exposure of regions * where we could have speculative execution) + * Incase of ISA v3.1, BHRB will capture only user-space + * addresses, hence include a check before filtering code */ - if (is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0) + if (!(ppmu->flags & PPMU_ARCH_31) && + is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0) continue; /* Branches are read most recent first (ie. mfbhrb 0 is -- cgit v1.2.3 From 80350a4bac992e3404067d31ff901ae9ff76aaa8 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:22 -0400 Subject: powerpc/perf: Add Power10 BHRB filter support for PERF_SAMPLE_BRANCH_IND_CALL/COND PowerISA v3.1 introduce filtering support for PERF_SAMPLE_BRANCH_IND_CALL/COND. The patch adds BHRB filter support for "ind_call" and "cond" in power10_bhrb_filter_map(). Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-11-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/perf/power10-pmu.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index 4b65e95c1e01..f7cff7f36a1c 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -83,6 +83,8 @@ /* MMCRA IFM bits - POWER10 */ #define POWER10_MMCRA_IFM1 0x0000000040000000UL +#define POWER10_MMCRA_IFM2 0x0000000080000000UL +#define POWER10_MMCRA_IFM3 0x00000000C0000000UL #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL /* Table of alternatives, sorted by column 0 */ @@ -233,8 +235,15 @@ static u64 power10_bhrb_filter_map(u64 branch_sample_type) if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) return -1; - if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) - return -1; + if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) { + pmu_bhrb_filter |= POWER10_MMCRA_IFM2; + return pmu_bhrb_filter; + } + + if (branch_sample_type & PERF_SAMPLE_BRANCH_COND) { + pmu_bhrb_filter |= POWER10_MMCRA_IFM3; + return pmu_bhrb_filter; + } if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL) return -1; -- cgit v1.2.3 From 1cade527f6e9bec6a6412d0641643c359ada8096 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Fri, 17 Jul 2020 10:38:23 -0400 Subject: powerpc/perf: BHRB control to disable BHRB logic when not used PowerISA v3.1 has few updates for the Branch History Rolling Buffer(BHRB). BHRB disable is controlled via Monitor Mode Control Register A (MMCRA) bit, namely "BHRB Recording Disable (BHRBRD)". This field controls whether BHRB entries are written when BHRB recording is enabled by other bits. This patch implements support for this BHRB disable bit. By setting 0b1 to this bit will disable the BHRB and by setting 0b0 to this bit will have BHRB enabled. This addresses backward compatibility (for older OS), since this bit will be cleared and hardware will be writing to BHRB by default. This patch addresses changes to set MMCRA (BHRBRD) at boot for power10 (there by the core will run faster) and enable this feature only on runtime ie, on explicit need from user. Also save/restore MMCRA in the restore path of state-loss idle state to make sure we keep BHRB disabled if it was not enabled on request at runtime. Signed-off-by: Athira Rajeev Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594996707-3727-12-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/perf/core-book3s.c | 20 ++++++++++++++++---- arch/powerpc/perf/isa207-common.c | 12 ++++++++++++ arch/powerpc/platforms/powernv/idle.c | 22 ++++++++++++++++++++-- 3 files changed, 48 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index e31629fb6e7e..e29c846bf35d 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1219,7 +1219,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; - unsigned long flags, mmcr0, val; + unsigned long flags, mmcr0, val, mmcra; if (!ppmu) return; @@ -1252,12 +1252,24 @@ static void power_pmu_disable(struct pmu *pmu) mb(); isync(); + val = mmcra = cpuhw->mmcr.mmcra; + /* * Disable instruction sampling if it was enabled */ - if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) { - mtspr(SPRN_MMCRA, - cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); + if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) + val &= ~MMCRA_SAMPLE_ENABLE; + + /* Disable BHRB via mmcra (BHRBRD) for p10 */ + if (ppmu->flags & PPMU_ARCH_31) + val |= MMCRA_BHRB_DISABLE; + + /* + * Write SPRN_MMCRA if mmcra has either disabled + * instruction sampling or BHRB. + */ + if (val != mmcra) { + mtspr(SPRN_MMCRA, mmcra); mb(); isync(); } diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 77643f3ae464..964437adec18 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -404,6 +404,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcra = mmcr1 = mmcr2 = mmcr3 = 0; + /* + * Disable bhrb unless explicitly requested + * by setting MMCRA (BHRBRD) bit. + */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + mmcra |= MMCRA_BHRB_DISABLE; + /* Second pass: assign PMCs, set all MMCR1 fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; @@ -479,6 +486,11 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcra |= val << MMCRA_IFM_SHIFT; } + /* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */ + if (cpu_has_feature(CPU_FTR_ARCH_31) && + (has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB))) + mmcra &= ~MMCRA_BHRB_DISABLE; + if (pevents[i]->attr.exclude_user) mmcr2 |= MMCR2_FCP(pmc); diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 2dd467383a88..1c9d0a9d50e0 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -611,6 +611,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) unsigned long srr1; unsigned long pls; unsigned long mmcr0 = 0; + unsigned long mmcra = 0; struct p9_sprs sprs = {}; /* avoid false used-uninitialised */ bool sprs_saved = false; @@ -657,6 +658,21 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) */ mmcr0 = mfspr(SPRN_MMCR0); } + + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + /* + * POWER10 uses MMCRA (BHRBRD) as BHRB disable bit. + * If the user hasn't asked for the BHRB to be + * written, the value of MMCRA[BHRBRD] is 1. + * On wakeup from stop, MMCRA[BHRBD] will be 0, + * since it is previleged resource and will be lost. + * Thus, if we do not save and restore the MMCRA[BHRBD], + * hardware will be needlessly writing to the BHRB + * in problem mode. + */ + mmcra = mfspr(SPRN_MMCRA); + } + if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) { sprs.lpcr = mfspr(SPRN_LPCR); sprs.hfscr = mfspr(SPRN_HFSCR); @@ -700,8 +716,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { - unsigned long mmcra; - /* * We don't need an isync after the mtsprs here because the * upcoming mtmsrd is execution synchronizing. @@ -721,6 +735,10 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) mtspr(SPRN_MMCR0, mmcr0); } + /* Reload MMCRA to restore BHRB disable bit for POWER10 */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + mtspr(SPRN_MMCRA, mmcra); + /* * DD2.2 and earlier need to set then clear bit 60 in MMCRA * to ensure the PMU starts running. -- cgit v1.2.3 From b2dc2977cba48990df45e0a96150663d4f342700 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Jun 2020 18:12:02 +1000 Subject: powerpc/64s/exception: treat NIA below __end_interrupts as soft-masked The scv instruction causes an interrupt which can enter the kernel with MSR[EE]=1, thus allowing interrupts to hit at any time. These must not be taken as normal interrupts, because they come from MSR[PR]=0 context, and yet the kernel stack is not yet set up and r13 is not set to the PACA). Treat this as a soft-masked interrupt regardless of the soft masked state. This does not affect behaviour yet, because currently all interrupts are taken with MSR[EE]=0. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200611081203.995112-2-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index fa080694e581..f15307c50bc1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -508,8 +508,24 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real) .macro __GEN_COMMON_BODY name .if IMASK + .if ! ISTACK + .error "No support for masked interrupt to use custom stack" + .endif + + /* If coming from user, skip soft-mask tests. */ + andi. r10,r12,MSR_PR + bne 2f + + /* Kernel code running below __end_interrupts is implicitly + * soft-masked */ + LOAD_HANDLER(r10, __end_interrupts) + cmpld r11,r10 + li r10,IMASK + blt- 1f + + /* Test the soft mask state against our interrupt's bit */ lbz r10,PACAIRQSOFTMASK(r13) - andi. r10,r10,IMASK +1: andi. r10,r10,IMASK /* Associate vector numbers with bits in paca->irq_happened */ .if IVEC == 0x500 || IVEC == 0xea0 li r10,PACA_IRQ_EE @@ -540,7 +556,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real) .if ISTACK andi. r10,r12,MSR_PR /* See if coming from user */ - mr r10,r1 /* Save r1 */ +2: mr r10,r1 /* Save r1 */ subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */ beq- 100f ld r1,PACAKSAVE(r13) /* kernel stack to use */ @@ -2838,7 +2854,8 @@ masked_interrupt: ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) - /* returns to kernel where r13 must be set up, so don't restore it */ + ld r13,PACA_EXGEN+EX_R13(r13) + /* May return to masked low address where r13 is not set up */ .if \hsrr HRFI_TO_KERNEL .else @@ -2997,6 +3014,10 @@ EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline) USE_FIXED_SECTION(virt_trampolines) /* + * All code below __end_interrupts is treated as soft-masked. If + * any code runs here with MSR[EE]=1, it must then cope with pending + * soft interrupt being raised (i.e., by ensuring it is replayed). + * * The __end_interrupts marker must be past the out-of-line (OOL) * handlers, so that they are copied to real address 0x100 when running * a relocatable kernel. This ensures they can be reached from the short -- cgit v1.2.3 From 7fa95f9adaee7e5cbb195d3359741120829e488b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Jun 2020 18:12:03 +1000 Subject: powerpc/64s: system call support for scv/rfscv instructions Add support for the scv instruction on POWER9 and later CPUs. For now this implements the zeroth scv vector 'scv 0', as identical to 'sc' system calls, with the exception that LR is not preserved, nor are volatile CR registers, and error is not indicated with CR0[SO], but by returning a negative errno. rfscv is implemented to return from scv type system calls. It can not be used to return from sc system calls because those are defined to preserve LR. getpid syscall throughput on POWER9 is improved by 26% (428 to 318 cycles), largely due to reducing mtmsr and mtspr. Signed-off-by: Nicholas Piggin [mpe: Fix ppc64e build] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200611081203.995112-3-npiggin@gmail.com --- Documentation/powerpc/syscall64-abi.rst | 42 +++++--- arch/powerpc/include/asm/asm-prototypes.h | 2 +- arch/powerpc/include/asm/exception-64s.h | 6 ++ arch/powerpc/include/asm/head-64.h | 2 +- arch/powerpc/include/asm/ppc_asm.h | 2 + arch/powerpc/include/asm/ptrace.h | 7 +- arch/powerpc/include/asm/setup.h | 4 +- arch/powerpc/include/asm/sstep.h | 1 + arch/powerpc/kernel/cpu_setup_power.S | 10 +- arch/powerpc/kernel/cputable.c | 3 +- arch/powerpc/kernel/dt_cpu_ftrs.c | 1 + arch/powerpc/kernel/entry_64.S | 171 +++++++++++++++++++++++++++++- arch/powerpc/kernel/exceptions-64s.S | 123 ++++++++++++++++++++- arch/powerpc/kernel/process.c | 10 +- arch/powerpc/kernel/setup_64.c | 5 +- arch/powerpc/kernel/signal.c | 19 +++- arch/powerpc/kernel/syscall_64.c | 32 ++++-- arch/powerpc/lib/sstep.c | 16 +++ arch/powerpc/platforms/pseries/setup.c | 8 +- arch/powerpc/xmon/xmon.c | 1 + 20 files changed, 421 insertions(+), 44 deletions(-) diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst index e49f69f941b9..46caaadbb029 100644 --- a/Documentation/powerpc/syscall64-abi.rst +++ b/Documentation/powerpc/syscall64-abi.rst @@ -5,6 +5,15 @@ Power Architecture 64-bit Linux system call ABI syscall ======= +Invocation +---------- +The syscall is made with the sc instruction, and returns with execution +continuing at the instruction following the sc instruction. + +If PPC_FEATURE2_SCV appears in the AT_HWCAP2 ELF auxiliary vector, the +scv 0 instruction is an alternative that may provide better performance, +with some differences to calling sequence. + syscall calling sequence\ [1]_ matches the Power Architecture 64-bit ELF ABI specification C function calling sequence, including register preservation rules, with the following differences. @@ -12,16 +21,23 @@ rules, with the following differences. .. [1] Some syscalls (typically low-level management functions) may have different calling sequences (e.g., rt_sigreturn). -Parameters and return value ---------------------------- +Parameters +---------- The system call number is specified in r0. There is a maximum of 6 integer parameters to a syscall, passed in r3-r8. -Both a return value and a return error code are returned. cr0.SO is the return -error code, and r3 is the return value or error code. When cr0.SO is clear, -the syscall succeeded and r3 is the return value. When cr0.SO is set, the -syscall failed and r3 is the error code that generally corresponds to errno. +Return value +------------ +- For the sc instruction, both a value and an error condition are returned. + cr0.SO is the error condition, and r3 is the return value. When cr0.SO is + clear, the syscall succeeded and r3 is the return value. When cr0.SO is set, + the syscall failed and r3 is the error value (that normally corresponds to + errno). + +- For the scv 0 instruction, the return value indicates failure if it is + -4095..-1 (i.e., it is >= -MAX_ERRNO (-4095) as an unsigned comparison), + in which case the error value is the negated return value. Stack ----- @@ -34,22 +50,23 @@ Register preservation rules match the ELF ABI calling sequence with the following differences: =========== ============= ======================================== +--- For the sc instruction, differences with the ELF ABI --- r0 Volatile (System call number.) r3 Volatile (Parameter 1, and return value.) r4-r8 Volatile (Parameters 2-6.) -cr0 Volatile (cr0.SO is the return error condition) +cr0 Volatile (cr0.SO is the return error condition.) cr1, cr5-7 Nonvolatile lr Nonvolatile + +--- For the scv 0 instruction, differences with the ELF ABI --- +r0 Volatile (System call number.) +r3 Volatile (Parameter 1, and return value.) +r4-r8 Volatile (Parameters 2-6.) =========== ============= ======================================== All floating point and vector data registers as well as control and status registers are nonvolatile. -Invocation ----------- -The syscall is performed with the sc instruction, and returns with execution -continuing at the instruction following the sc instruction. - Transactional Memory -------------------- Syscall behavior can change if the processor is in transactional or suspended @@ -75,6 +92,7 @@ auxiliary vector. returning to the caller. This case is not well defined or supported, so this behavior should not be relied upon. +scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC. vsyscall ======== diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7d81e86a1e5d..fb47bf5818c8 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -98,7 +98,7 @@ unsigned long __init early_init(unsigned long dt_ptr); void __init machine_init(u64 dt_ptr); #endif long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs); -notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs); +notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr); notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr); diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 47bd4ea0837d..0c2fe7f042d1 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -123,6 +123,12 @@ hrfid; \ b hrfi_flush_fallback +#define RFSCV_TO_USER \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + RFSCV; \ + b rfscv_flush_fallback + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_EXCEPTION_H */ diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h index 2dabcf668292..4cb9efa2eb21 100644 --- a/arch/powerpc/include/asm/head-64.h +++ b/arch/powerpc/include/asm/head-64.h @@ -128,7 +128,7 @@ name: .if ((start) % (size) != 0); \ .error "Fixed section exception vector misalignment"; \ .endif; \ - .if ((size) != 0x20) && ((size) != 0x80) && ((size) != 0x100); \ + .if ((size) != 0x20) && ((size) != 0x80) && ((size) != 0x100) && ((size) != 0x1000); \ .error "Fixed section exception vector bad size"; \ .endif; \ .if (start) < sname##_start; \ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6b03dff61a05..160f3bb77ea4 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -755,6 +755,8 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) #define N_SLINE 68 #define N_SO 100 +#define RFSCV .long 0x4c0000a4 + /* * Create an endian fixup trampoline * diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index ac3970fff0d5..f194339cef3b 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -222,9 +222,14 @@ static inline void set_trap(struct pt_regs *regs, unsigned long val) regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK); } +static inline bool trap_is_scv(struct pt_regs *regs) +{ + return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000); +} + static inline bool trap_is_syscall(struct pt_regs *regs) { - return TRAP(regs) == 0xc00; + return (trap_is_scv(regs) || TRAP(regs) == 0xc00); } static inline bool trap_norestart(struct pt_regs *regs) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 65676e2325b8..9efbddee2bca 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -30,12 +30,12 @@ void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 #ifdef CONFIG_PPC_PSERIES -extern void pseries_enable_reloc_on_exc(void); +extern bool pseries_enable_reloc_on_exc(void); extern void pseries_disable_reloc_on_exc(void); extern void pseries_big_endian_exceptions(void); extern void pseries_little_endian_exceptions(void); #else -static inline void pseries_enable_reloc_on_exc(void) {} +static inline bool pseries_enable_reloc_on_exc(void) { return false; } static inline void pseries_disable_reloc_on_exc(void) {} static inline void pseries_big_endian_exceptions(void) {} static inline void pseries_little_endian_exceptions(void) {} diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 3b01c69a44aa..eaa4fb6c8960 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -40,6 +40,7 @@ enum instruction_type { CACHEOP, BARRIER, SYSCALL, + SYSCALL_VECTORED_0, MFMSR, MTMSR, RFI, diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index efdcfa714106..86527d19348c 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -98,7 +98,7 @@ _GLOBAL(__setup_cpu_power10) _GLOBAL(__setup_cpu_power9) mflr r11 - bl __init_FSCR + bl __init_FSCR_power9 1: bl __init_PMU bl __init_hvmode_206 mtlr r11 @@ -128,7 +128,7 @@ _GLOBAL(__restore_cpu_power10) _GLOBAL(__restore_cpu_power9) mflr r11 - bl __init_FSCR + bl __init_FSCR_power9 1: bl __init_PMU mfmsr r3 rldicl. r0,r3,4,63 @@ -198,6 +198,12 @@ __init_FSCR_power10: mtspr SPRN_FSCR, r3 // fall through +__init_FSCR_power9: + mfspr r3, SPRN_FSCR + ori r3, r3, FSCR_SCV + mtspr SPRN_FSCR, r3 + // fall through + __init_FSCR: mfspr r3,SPRN_FSCR ori r3,r3,FSCR_TAR|FSCR_EBB diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index b4066354f073..3d406a9626e8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -120,7 +120,8 @@ extern void __restore_cpu_e6500(void); #define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ PPC_FEATURE2_ARCH_3_00 | \ PPC_FEATURE2_HAS_IEEE128 | \ - PPC_FEATURE2_DARN ) + PPC_FEATURE2_DARN | \ + PPC_FEATURE2_SCV) #define COMMON_USER_POWER10 COMMON_USER_POWER9 #define COMMON_USER2_POWER10 (COMMON_USER2_POWER9 | \ PPC_FEATURE2_ARCH_3_1 | \ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 3a409517c031..50b2d544361e 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -587,6 +587,7 @@ static struct dt_cpu_feature_match __initdata {"little-endian", feat_enable_le, CPU_FTR_REAL_LE}, {"smt", feat_enable_smt, 0}, {"interrupt-facilities", feat_enable, 0}, + {"system-call-vectored", feat_enable, 0}, {"timer-facilities", feat_enable, 0}, {"timer-facilities-v3", feat_enable, 0}, {"debug-facilities", feat_enable, 0}, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9d49338e0c85..223c4f008e63 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -64,15 +64,173 @@ exception_marker: .section ".text" .align 7 +#ifdef CONFIG_PPC_BOOK3S +.macro system_call_vectored name trapnr + .globl system_call_vectored_\name +system_call_vectored_\name: +_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +BEGIN_FTR_SECTION + extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ + bne .Ltabort_syscall +END_FTR_SECTION_IFSET(CPU_FTR_TM) +#endif + INTERRUPT_TO_KERNEL + mr r10,r1 + ld r1,PACAKSAVE(r13) + std r10,0(r1) + std r11,_NIP(r1) + std r12,_MSR(r1) + std r0,GPR0(r1) + std r10,GPR1(r1) + std r2,GPR2(r1) + ld r2,PACATOC(r13) + mfcr r12 + li r11,0 + /* Can we avoid saving r3-r8 in common case? */ + std r3,GPR3(r1) + std r4,GPR4(r1) + std r5,GPR5(r1) + std r6,GPR6(r1) + std r7,GPR7(r1) + std r8,GPR8(r1) + /* Zero r9-r12, this should only be required when restoring all GPRs */ + std r11,GPR9(r1) + std r11,GPR10(r1) + std r11,GPR11(r1) + std r11,GPR12(r1) + std r9,GPR13(r1) + SAVE_NVGPRS(r1) + std r11,_XER(r1) + std r11,_LINK(r1) + std r11,_CTR(r1) + + li r11,\trapnr + std r11,_TRAP(r1) + std r12,_CCR(r1) + std r3,ORIG_GPR3(r1) + addi r10,r1,STACK_FRAME_OVERHEAD + ld r11,exception_marker@toc(r2) + std r11,-16(r10) /* "regshere" marker */ + + /* + * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which + * would clobber syscall parameters. Also we always enter with IRQs + * enabled and nothing pending. system_call_exception() will call + * trace_hardirqs_off(). + * + * scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The + * entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED. + */ + + /* Calling convention has r9 = orig r0, r10 = regs */ + mr r9,r0 + bl system_call_exception + +.Lsyscall_vectored_\name\()_exit: + addi r4,r1,STACK_FRAME_OVERHEAD + li r5,1 /* scv */ + bl syscall_exit_prepare + + ld r2,_CCR(r1) + ld r4,_NIP(r1) + ld r5,_MSR(r1) + +BEGIN_FTR_SECTION + stdcx. r0,0,r1 /* to clear the reservation */ +END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) + +BEGIN_FTR_SECTION + HMT_MEDIUM_LOW +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + + cmpdi r3,0 + bne .Lsyscall_vectored_\name\()_restore_regs + + /* rfscv returns with LR->NIA and CTR->MSR */ + mtlr r4 + mtctr r5 + + /* Could zero these as per ABI, but we may consider a stricter ABI + * which preserves these if libc implementations can benefit, so + * restore them for now until further measurement is done. */ + ld r0,GPR0(r1) + ld r4,GPR4(r1) + ld r5,GPR5(r1) + ld r6,GPR6(r1) + ld r7,GPR7(r1) + ld r8,GPR8(r1) + /* Zero volatile regs that may contain sensitive kernel data */ + li r9,0 + li r10,0 + li r11,0 + li r12,0 + mtspr SPRN_XER,r0 + + /* + * We don't need to restore AMR on the way back to userspace for KUAP. + * The value of AMR only matters while we're in the kernel. + */ + mtcr r2 + ld r2,GPR2(r1) + ld r3,GPR3(r1) + ld r13,GPR13(r1) + ld r1,GPR1(r1) + RFSCV_TO_USER + b . /* prevent speculative execution */ + +.Lsyscall_vectored_\name\()_restore_regs: + li r3,0 + mtmsrd r3,1 + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r5 + + ld r3,_CTR(r1) + ld r4,_LINK(r1) + ld r5,_XER(r1) + + REST_NVGPRS(r1) + ld r0,GPR0(r1) + mtcr r2 + mtctr r3 + mtlr r4 + mtspr SPRN_XER,r5 + REST_10GPRS(2, r1) + REST_2GPRS(12, r1) + ld r1,GPR1(r1) + RFI_TO_USER +.endm + +system_call_vectored common 0x3000 +/* + * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0 + * which is tested by system_call_exception when r0 is -1 (as set by vector + * entry code). + */ +system_call_vectored sigill 0x7ff0 + + +/* + * Entered via kernel return set up by kernel/sstep.c, must match entry regs + */ + .globl system_call_vectored_emulate +system_call_vectored_emulate: +_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate) + li r10,IRQS_ALL_DISABLED + stb r10,PACAIRQSOFTMASK(r13) + b system_call_vectored_common +#endif + + .balign IFETCH_ALIGN_BYTES .globl system_call_common system_call_common: +_ASM_NOKPROBE_SYMBOL(system_call_common) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ bne .Ltabort_syscall END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif -_ASM_NOKPROBE_SYMBOL(system_call_common) mr r10,r1 ld r1,PACAKSAVE(r13) std r10,0(r1) @@ -138,6 +296,7 @@ END_BTB_FLUSH_SECTION .Lsyscall_exit: addi r4,r1,STACK_FRAME_OVERHEAD + li r5,0 /* !scv */ bl syscall_exit_prepare ld r2,_CCR(r1) @@ -224,10 +383,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) b . /* prevent speculative execution */ #endif +#ifdef CONFIG_PPC_BOOK3S +_GLOBAL(ret_from_fork_scv) + bl schedule_tail + REST_NVGPRS(r1) + li r3,0 /* fork() return value */ + b .Lsyscall_vectored_common_exit +#endif + _GLOBAL(ret_from_fork) bl schedule_tail REST_NVGPRS(r1) - li r3,0 + li r3,0 /* fork() return value */ b .Lsyscall_exit _GLOBAL(ret_from_kernel_thread) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f15307c50bc1..8afc0e03d7d4 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -756,6 +756,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) * guarantee they will be delivered virtually. Some conditions (see the ISA) * cause exceptions to be delivered in real mode. * + * The scv instructions are a special case. They get a 0x3000 offset applied. + * scv exceptions have unique reentrancy properties, see below. + * * It's impossible to receive interrupts below 0x300 via AIL. * * KVM: None of the virtual exceptions are from the guest. Anything that @@ -765,8 +768,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) * We layout physical memory as follows: * 0x0000 - 0x00ff : Secondary processor spin code * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors - * 0x1900 - 0x3fff : Real mode trampolines - * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors + * 0x1900 - 0x2fff : Real mode trampolines + * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors * 0x5900 - 0x6fff : Relon mode trampolines * 0x7000 - 0x7fff : FWNMI data area * 0x8000 - .... : Common interrupt handlers, remaining early @@ -777,8 +780,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) * vectors there. */ OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) -OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000) -OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900) +OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000) +OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900) OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) #ifdef CONFIG_PPC_POWERNV @@ -814,6 +817,77 @@ USE_FIXED_SECTION(real_vectors) .globl __start_interrupts __start_interrupts: +/** + * Interrupt 0x3000 - System Call Vectored Interrupt (syscall). + * This is a synchronous interrupt invoked with the "scv" instruction. The + * system call does not alter the HV bit, so it is directed to the OS. + * + * Handling: + * scv instructions enter the kernel without changing EE, RI, ME, or HV. + * In particular, this means we can take a maskable interrupt at any point + * in the scv handler, which is unlike any other interrupt. This is solved + * by treating the instruction addresses below __end_interrupts as being + * soft-masked. + * + * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and + * ensure scv is never executed with relocation off, which means AIL-0 + * should never happen. + * + * Before leaving the below __end_interrupts text, at least of the following + * must be true: + * - MSR[PR]=1 (i.e., return to userspace) + * - MSR_EE|MSR_RI is set (no reentrant exceptions) + * - Standard kernel environment is set up (stack, paca, etc) + * + * Call convention: + * + * syscall register convention is in Documentation/powerpc/syscall64-abi.rst + */ +EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000) + /* SCV 0 */ + mr r9,r13 + GET_PACA(r13) + mflr r11 + mfctr r12 + li r10,IRQS_ALL_DISABLED + stb r10,PACAIRQSOFTMASK(r13) +#ifdef CONFIG_RELOCATABLE + b system_call_vectored_tramp +#else + b system_call_vectored_common +#endif + nop + + /* SCV 1 - 127 */ + .rept 127 + mr r9,r13 + GET_PACA(r13) + mflr r11 + mfctr r12 + li r10,IRQS_ALL_DISABLED + stb r10,PACAIRQSOFTMASK(r13) + li r0,-1 /* cause failure */ +#ifdef CONFIG_RELOCATABLE + b system_call_vectored_sigill_tramp +#else + b system_call_vectored_sigill +#endif + .endr +EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000) + +#ifdef CONFIG_RELOCATABLE +TRAMP_VIRT_BEGIN(system_call_vectored_tramp) + __LOAD_HANDLER(r10, system_call_vectored_common) + mtctr r10 + bctr + +TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp) + __LOAD_HANDLER(r10, system_call_vectored_sigill) + mtctr r10 + bctr +#endif + + /* No virt vectors corresponding with 0x0..0x100 */ EXC_VIRT_NONE(0x4000, 0x100) @@ -2963,6 +3037,47 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) GET_SCRATCH0(r13); hrfid +TRAMP_REAL_BEGIN(rfscv_flush_fallback) + /* system call volatile */ + mr r7,r13 + GET_PACA(r13); + mr r8,r1 + ld r1,PACAKSAVE(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SIZE(r13) + srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ + mtctr r11 + DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync + + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ +1: + ld r11,(0x80 + 8)*0(r10) + ld r11,(0x80 + 8)*1(r10) + ld r11,(0x80 + 8)*2(r10) + ld r11,(0x80 + 8)*3(r10) + ld r11,(0x80 + 8)*4(r10) + ld r11,(0x80 + 8)*5(r10) + ld r11,(0x80 + 8)*6(r10) + ld r11,(0x80 + 8)*7(r10) + addi r10,r10,0x80*8 + bdnz 1b + + mtctr r9 + li r9,0 + li r10,0 + li r11,0 + mr r1,r8 + mr r13,r7 + RFSCV + USE_TEXT_SECTION() MASKED_INTERRUPT MASKED_INTERRUPT hsrr=1 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4650b9bb217f..939935613cf1 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1599,6 +1599,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, { struct pt_regs *childregs, *kregs; extern void ret_from_fork(void); + extern void ret_from_fork_scv(void); extern void ret_from_kernel_thread(void); void (*f)(void); unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; @@ -1635,7 +1636,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, if (usp) childregs->gpr[1] = usp; p->thread.regs = childregs; - childregs->gpr[3] = 0; /* Result from fork() */ + /* 64s sets this in ret_from_fork */ + if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64)) + childregs->gpr[3] = 0; /* Result from fork() */ if (clone_flags & CLONE_SETTLS) { if (!is_32bit_task()) childregs->gpr[13] = tls; @@ -1643,7 +1646,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, childregs->gpr[2] = tls; } - f = ret_from_fork; + if (trap_is_scv(regs)) + f = ret_from_fork_scv; + else + f = ret_from_fork; } childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); sp -= STACK_FRAME_OVERHEAD; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 0ba1ed77dc68..6be430107c6f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -196,7 +196,10 @@ static void __init configure_exceptions(void) /* Under a PAPR hypervisor, we need hypercalls */ if (firmware_has_feature(FW_FEATURE_SET_MODE)) { /* Enable AIL if possible */ - pseries_enable_reloc_on_exc(); + if (!pseries_enable_reloc_on_exc()) { + init_task.thread.fscr &= ~FSCR_SCV; + cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV; + } /* * Tell the hypervisor that we want our exceptions to diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index b4143b6ff093..d15a98c758b8 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -205,8 +205,14 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, return; /* error signalled ? */ - if (!(regs->ccr & 0x10000000)) + if (trap_is_scv(regs)) { + /* 32-bit compat mode sign extend? */ + if (!IS_ERR_VALUE(ret)) + return; + ret = -ret; + } else if (!(regs->ccr & 0x10000000)) { return; + } switch (ret) { case ERESTART_RESTARTBLOCK: @@ -239,9 +245,14 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, regs->nip -= 4; regs->result = 0; } else { - regs->result = -EINTR; - regs->gpr[3] = EINTR; - regs->ccr |= 0x10000000; + if (trap_is_scv(regs)) { + regs->result = -EINTR; + regs->gpr[3] = -EINTR; + } else { + regs->result = -EINTR; + regs->gpr[3] = EINTR; + regs->ccr |= 0x10000000; + } } } diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 79edba3ab312..a783fd324cb0 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -60,6 +60,11 @@ notrace long system_call_exception(long r3, long r4, long r5, local_irq_enable(); if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { + if (unlikely(regs->trap == 0x7ff0)) { + /* Unsupported scv vector */ + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + return regs->gpr[3]; + } /* * We use the return value of do_syscall_trace_enter() as the * syscall number. If the syscall was rejected for any reason @@ -78,6 +83,11 @@ notrace long system_call_exception(long r3, long r4, long r5, r8 = regs->gpr[8]; } else if (unlikely(r0 >= NR_syscalls)) { + if (unlikely(regs->trap == 0x7ff0)) { + /* Unsupported scv vector */ + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + return regs->gpr[3]; + } return -ENOSYS; } @@ -105,16 +115,20 @@ notrace long system_call_exception(long r3, long r4, long r5, * local irqs must be disabled. Returns false if the caller must re-enable * them, check for new work, and try again. */ -static notrace inline bool prep_irq_for_enabled_exit(void) +static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri) { /* This must be done with RI=1 because tracing may touch vmaps */ trace_hardirqs_on(); /* This pattern matches prep_irq_for_idle */ - __hard_EE_RI_disable(); + if (clear_ri) + __hard_EE_RI_disable(); + else + __hard_irq_disable(); if (unlikely(lazy_irq_pending_nocheck())) { /* Took an interrupt, may have more exit work to do. */ - __hard_RI_enable(); + if (clear_ri) + __hard_RI_enable(); trace_hardirqs_off(); local_paca->irq_happened |= PACA_IRQ_HARD_DIS; @@ -136,7 +150,8 @@ static notrace inline bool prep_irq_for_enabled_exit(void) * because RI=0 and soft mask state is "unreconciled", so it is marked notrace. */ notrace unsigned long syscall_exit_prepare(unsigned long r3, - struct pt_regs *regs) + struct pt_regs *regs, + long scv) { unsigned long *ti_flagsp = ¤t_thread_info()->flags; unsigned long ti_flags; @@ -151,7 +166,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ti_flags = *ti_flagsp; - if (unlikely(r3 >= (unsigned long)-MAX_ERRNO)) { + if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) { if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { r3 = -r3; regs->ccr |= 0x10000000; /* Set SO bit in CR */ @@ -211,7 +226,8 @@ again: } } - if (unlikely(!prep_irq_for_enabled_exit())) { + /* scv need not set RI=0 because SRRs are not used */ + if (unlikely(!prep_irq_for_enabled_exit(!scv))) { local_irq_enable(); goto again; } @@ -282,7 +298,7 @@ again: } } - if (unlikely(!prep_irq_for_enabled_exit())) { + if (unlikely(!prep_irq_for_enabled_exit(true))) { local_irq_enable(); local_irq_disable(); goto again; @@ -345,7 +361,7 @@ again: } } - if (unlikely(!prep_irq_for_enabled_exit())) { + if (unlikely(!prep_irq_for_enabled_exit(true))) { /* * Can't local_irq_restore to replay if we were in * interrupt context. Must replay directly. diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5abe98216dc2..ab6a137c3862 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -16,6 +16,7 @@ #include extern char system_call_common[]; +extern char system_call_vectored_emulate[]; #ifdef CONFIG_PPC64 /* Bits in SRR1 that are copied from MSR */ @@ -1236,6 +1237,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 17: /* sc */ if ((word & 0xfe2) == 2) op->type = SYSCALL; + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + (word & 0xfe3) == 1) + op->type = SYSCALL_VECTORED_0; else op->type = UNKNOWN; return 0; @@ -3378,6 +3382,18 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) regs->msr = MSR_KERNEL; return 1; +#ifdef CONFIG_PPC64_BOOK3S + case SYSCALL_VECTORED_0: /* scv 0 */ + regs->gpr[9] = regs->gpr[13]; + regs->gpr[10] = MSR_KERNEL; + regs->gpr[11] = regs->nip + 4; + regs->gpr[12] = regs->msr & MSR_MASK; + regs->gpr[13] = (unsigned long) get_paca(); + regs->nip = (unsigned long) &system_call_vectored_emulate; + regs->msr = MSR_KERNEL; + return 1; +#endif + case RFI: return -1; #endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 2db8469e475f..8c85466e0dd8 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -358,7 +358,7 @@ static void pseries_lpar_idle(void) * to ever be a problem in practice we can move this into a kernel thread to * finish off the process later in boot. */ -void pseries_enable_reloc_on_exc(void) +bool pseries_enable_reloc_on_exc(void) { long rc; unsigned int delay, total_delay = 0; @@ -369,11 +369,13 @@ void pseries_enable_reloc_on_exc(void) if (rc == H_P2) { pr_info("Relocation on exceptions not" " supported\n"); + return false; } else if (rc != H_SUCCESS) { pr_warn("Unable to enable relocation" " on exceptions: %ld\n", rc); + return false; } - break; + return true; } delay = get_longbusy_msecs(rc); @@ -382,7 +384,7 @@ void pseries_enable_reloc_on_exc(void) pr_warn("Warning: Giving up waiting to enable " "relocation on exceptions (%u msec)!\n", total_delay); - return; + return false; } mdelay(delay); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 7efe4bc3ccf6..3203c3606737 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1593,6 +1593,7 @@ const char *getvecname(unsigned long vec) case 0x1300: ret = "(Instruction Breakpoint)"; break; case 0x1500: ret = "(Denormalisation)"; break; case 0x1700: ret = "(Altivec Assist)"; break; + case 0x3000: ret = "(System Call Vectored)"; break; default: ret = ""; } return ret; -- cgit v1.2.3 From b6b54b42722a2393056c891c0d05cd8cc40eb776 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Mon, 25 May 2020 12:59:19 +1000 Subject: powerpc/sstep: Add tests for prefixed integer load/stores Add tests for the prefixed versions of the integer load/stores that are currently tested. This includes the following instructions: * Prefixed Load Doubleword (pld) * Prefixed Load Word and Zero (plwz) * Prefixed Store Doubleword (pstd) Skip the new tests if ISA v3.1 is unsupported. Signed-off-by: Jordan Niethe [mpe: Fix conflicts with ppc-opcode.h changes] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525025923.19843-1-jniethe5@gmail.com --- arch/powerpc/include/asm/ppc-opcode.h | 9 ++++ arch/powerpc/lib/test_emulate_step.c | 85 +++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 6b7a3f0f2d62..90ae33a74d78 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -265,6 +265,14 @@ #define PPC_INST_BRANCH 0x48000000 #define PPC_INST_BRANCH_COND 0x40800000 +/* Prefixes */ +#define PPC_PREFIX_MLS 0x06000000 +#define PPC_PREFIX_8LS 0x04000000 + +/* Prefixed instructions */ +#define PPC_INST_PLD 0xe4000000 +#define PPC_INST_PSTD 0xf4000000 + /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) #define ___PPC_RB(b) (((b) & 0x1f) << 11) @@ -296,6 +304,7 @@ #define __PPC_CT(t) (((t) & 0x0f) << 21) #define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11)) #define __PPC_RC21 (0x1 << 10) +#define __PPC_PRFX_R(r) (((r) & 0x1) << 20) /* * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 988c734e7370..34f1ea0e6742 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -19,6 +19,18 @@ #define IGNORE_XER (0x1UL << 32) #define IGNORE_CCR (0x1UL << 33) +#define TEST_PLD(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_INST_PLD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) + +#define TEST_PLWZ(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_RAW_LWZ(r, base, i)) + +#define TEST_PSTD(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_INST_PSTD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) + static void __init init_pt_regs(struct pt_regs *regs) { static unsigned long msr; @@ -70,6 +82,29 @@ static void __init test_ld(void) show_result("ld", "FAIL"); } +static void __init test_pld(void) +{ + struct pt_regs regs; + unsigned long a = 0x23; + int stepped = -1; + + if (!cpu_has_feature(CPU_FTR_ARCH_31)) { + show_result("pld", "SKIP (!CPU_FTR_ARCH_31)"); + return; + } + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long)&a; + + /* pld r5, 0(r3), 0 */ + stepped = emulate_step(®s, TEST_PLD(5, 3, 0, 0)); + + if (stepped == 1 && regs.gpr[5] == a) + show_result("pld", "PASS"); + else + show_result("pld", "FAIL"); +} + static void __init test_lwz(void) { struct pt_regs regs; @@ -88,6 +123,30 @@ static void __init test_lwz(void) show_result("lwz", "FAIL"); } +static void __init test_plwz(void) +{ + struct pt_regs regs; + unsigned int a = 0x4545; + int stepped = -1; + + if (!cpu_has_feature(CPU_FTR_ARCH_31)) { + show_result("plwz", "SKIP (!CPU_FTR_ARCH_31)"); + return; + } + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long)&a; + + /* plwz r5, 0(r3), 0 */ + + stepped = emulate_step(®s, TEST_PLWZ(5, 3, 0, 0)); + + if (stepped == 1 && regs.gpr[5] == a) + show_result("plwz", "PASS"); + else + show_result("plwz", "FAIL"); +} + static void __init test_lwzx(void) { struct pt_regs regs; @@ -125,6 +184,29 @@ static void __init test_std(void) show_result("std", "FAIL"); } +static void __init test_pstd(void) +{ + struct pt_regs regs; + unsigned long a = 0x1234; + int stepped = -1; + + if (!cpu_has_feature(CPU_FTR_ARCH_31)) { + show_result("pstd", "SKIP (!CPU_FTR_ARCH_31)"); + return; + } + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long)&a; + regs.gpr[5] = 0x5678; + + /* pstd r5, 0(r3), 0 */ + stepped = emulate_step(®s, TEST_PSTD(5, 3, 0, 0)); + if (stepped == 1 || regs.gpr[5] == a) + show_result("pstd", "PASS"); + else + show_result("pstd", "FAIL"); +} + static void __init test_ldarx_stdcx(void) { struct pt_regs regs; @@ -404,9 +486,12 @@ static void __init test_lxvd2x_stxvd2x(void) static void __init run_tests_load_store(void) { test_ld(); + test_pld(); test_lwz(); + test_plwz(); test_lwzx(); test_std(); + test_pstd(); test_ldarx_stdcx(); test_lfsx_stfsx(); test_lfdx_stfdx(); -- cgit v1.2.3 From 0396de6d8561c721b03fce386eb9682b37a26013 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Mon, 25 May 2020 12:59:20 +1000 Subject: powerpc/sstep: Add tests for prefixed floating-point load/stores Add tests for the prefixed versions of the floating-point load/stores that are currently tested. This includes the following instructions: * Prefixed Load Floating-Point Single (plfs) * Prefixed Load Floating-Point Double (plfd) * Prefixed Store Floating-Point Single (pstfs) * Prefixed Store Floating-Point Double (pstfd) Skip the new tests if ISA v3.10 is unsupported. Signed-off-by: Jordan Niethe [mpe: Fix conflicts with ppc-opcode.h changes] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525025923.19843-2-jniethe5@gmail.com --- arch/powerpc/include/asm/ppc-opcode.h | 4 ++ arch/powerpc/lib/test_emulate_step.c | 124 ++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 90ae33a74d78..4c0bdafb6a7b 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -266,6 +266,10 @@ #define PPC_INST_BRANCH_COND 0x40800000 /* Prefixes */ +#define PPC_INST_LFS 0xc0000000 +#define PPC_INST_STFS 0xd0000000 +#define PPC_INST_LFD 0xc8000000 +#define PPC_INST_STFD 0xd8000000 #define PPC_PREFIX_MLS 0x06000000 #define PPC_PREFIX_8LS 0x04000000 diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 34f1ea0e6742..0059290f0fed 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -31,6 +31,22 @@ ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_PSTD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define TEST_PLFS(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_INST_LFS | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) + +#define TEST_PSTFS(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_INST_STFS | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) + +#define TEST_PLFD(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_INST_LFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) + +#define TEST_PSTFD(r, base, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_INST_STFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) + static void __init init_pt_regs(struct pt_regs *regs) { static unsigned long msr; @@ -304,6 +320,53 @@ static void __init test_lfsx_stfsx(void) show_result("stfsx", "FAIL"); } +static void __init test_plfs_pstfs(void) +{ + struct pt_regs regs; + union { + float a; + int b; + } c; + int cached_b; + int stepped = -1; + + if (!cpu_has_feature(CPU_FTR_ARCH_31)) { + show_result("pld", "SKIP (!CPU_FTR_ARCH_31)"); + return; + } + + init_pt_regs(®s); + + + /*** plfs ***/ + + c.a = 123.45; + cached_b = c.b; + + regs.gpr[3] = (unsigned long)&c.a; + + /* plfs frt10, 0(r3), 0 */ + stepped = emulate_step(®s, TEST_PLFS(10, 3, 0, 0)); + + if (stepped == 1) + show_result("plfs", "PASS"); + else + show_result("plfs", "FAIL"); + + + /*** pstfs ***/ + + c.a = 678.91; + + /* pstfs frs10, 0(r3), 0 */ + stepped = emulate_step(®s, TEST_PSTFS(10, 3, 0, 0)); + + if (stepped == 1 && c.b == cached_b) + show_result("pstfs", "PASS"); + else + show_result("pstfs", "FAIL"); +} + static void __init test_lfdx_stfdx(void) { struct pt_regs regs; @@ -346,6 +409,53 @@ static void __init test_lfdx_stfdx(void) else show_result("stfdx", "FAIL"); } + +static void __init test_plfd_pstfd(void) +{ + struct pt_regs regs; + union { + double a; + long b; + } c; + long cached_b; + int stepped = -1; + + if (!cpu_has_feature(CPU_FTR_ARCH_31)) { + show_result("pld", "SKIP (!CPU_FTR_ARCH_31)"); + return; + } + + init_pt_regs(®s); + + + /*** plfd ***/ + + c.a = 123456.78; + cached_b = c.b; + + regs.gpr[3] = (unsigned long)&c.a; + + /* plfd frt10, 0(r3), 0 */ + stepped = emulate_step(®s, TEST_PLFD(10, 3, 0, 0)); + + if (stepped == 1) + show_result("plfd", "PASS"); + else + show_result("plfd", "FAIL"); + + + /*** pstfd ***/ + + c.a = 987654.32; + + /* pstfd frs10, 0(r3), 0 */ + stepped = emulate_step(®s, TEST_PSTFD(10, 3, 0, 0)); + + if (stepped == 1 && c.b == cached_b) + show_result("pstfd", "PASS"); + else + show_result("pstfd", "FAIL"); +} #else static void __init test_lfsx_stfsx(void) { @@ -353,11 +463,23 @@ static void __init test_lfsx_stfsx(void) show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)"); } +static void __init test_plfs_pstfs(void) +{ + show_result("plfs", "SKIP (CONFIG_PPC_FPU is not set)"); + show_result("pstfs", "SKIP (CONFIG_PPC_FPU is not set)"); +} + static void __init test_lfdx_stfdx(void) { show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)"); show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)"); } + +static void __init test_plfd_pstfd(void) +{ + show_result("plfd", "SKIP (CONFIG_PPC_FPU is not set)"); + show_result("pstfd", "SKIP (CONFIG_PPC_FPU is not set)"); +} #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC @@ -494,7 +616,9 @@ static void __init run_tests_load_store(void) test_pstd(); test_ldarx_stdcx(); test_lfsx_stfsx(); + test_plfs_pstfs(); test_lfdx_stfdx(); + test_plfd_pstfd(); test_lvx_stvx(); test_lxvd2x_stxvd2x(); } -- cgit v1.2.3 From 1c89cf7fbed36f078b20fd47d308b4fc6dbff5f6 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Mon, 25 May 2020 12:59:21 +1000 Subject: powerpc/sstep: Set NIP in instruction emulation tests The tests for emulation of compute instructions execute and emulate an instruction and then compare the results to verify the emulation. In ISA v3.1 there are instructions that operate relative to the NIP. Therefore set the NIP in the regs used for the emulated instruction to the location of the executed instruction so they will give the same result. This is a rework of a patch by Balamuruhan S. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525025923.19843-3-jniethe5@gmail.com --- arch/powerpc/lib/test_emulate_step.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 0059290f0fed..857b95ef5713 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1011,11 +1011,14 @@ static struct compute_test compute_tests[] = { static int __init emulate_compute_instr(struct pt_regs *regs, struct ppc_inst instr) { + extern s32 patch__exec_instr; struct instruction_op op; if (!regs || !ppc_inst_val(instr)) return -EINVAL; + regs->nip = patch_site_addr(&patch__exec_instr); + if (analyse_instr(&op, regs, instr) != 1 || GETTYPE(op.type) != COMPUTE) { pr_info("emulation failed, instruction = 0x%08x\n", ppc_inst_val(instr)); -- cgit v1.2.3 From 301ebf7d69f6709575d137a41a0291f69f343aed Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Mon, 25 May 2020 12:59:22 +1000 Subject: powerpc/sstep: Let compute tests specify a required cpu feature An a array of struct compute_test's are used to declare tests for compute instructions. Add a cpu_feature field to struct compute_test as an optional way to specify a cpu feature that must be present. If not present then skip the test. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525025923.19843-4-jniethe5@gmail.com --- arch/powerpc/lib/test_emulate_step.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 857b95ef5713..f8cd3ca356c6 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -625,6 +625,7 @@ static void __init run_tests_load_store(void) struct compute_test { char *mnemonic; + unsigned long cpu_feature; struct { char *descr; unsigned long flags; @@ -1068,6 +1069,11 @@ static void __init run_tests_compute(void) for (i = 0; i < ARRAY_SIZE(compute_tests); i++) { test = &compute_tests[i]; + if (test->cpu_feature && !early_cpu_has_feature(test->cpu_feature)) { + show_result(test->mnemonic, "SKIP (!CPU_FTR)"); + continue; + } + for (j = 0; j < MAX_SUBTESTS && test->subtests[j].descr; j++) { instr = test->subtests[j].instr; flags = test->subtests[j].flags; -- cgit v1.2.3 From 4f825900786e1c24e4c48622e12eb493a6cd27b6 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Mon, 25 May 2020 12:59:23 +1000 Subject: powerpc/sstep: Add tests for Prefixed Add Immediate Use the existing support for testing compute type instructions to test Prefixed Add Immediate (paddi). The R bit of the paddi instruction controls whether current instruction address is used. Add test cases for when R=1 and for R=0. paddi has a 34 bit immediate field formed by concatenating si0 and si1. Add tests for the extreme values of this field. Skip the paddi tests if ISA v3.1 is unsupported. Some of these test cases were added by Balamuruhan S. Signed-off-by: Jordan Niethe [mpe: Fix conflicts with ppc-opcode.h changes, squash in .balign] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525025923.19843-5-jniethe5@gmail.com --- arch/powerpc/lib/test_emulate_step.c | 125 ++++++++++++++++++++++++ arch/powerpc/lib/test_emulate_step_exec_instr.S | 2 + 2 files changed, 127 insertions(+) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index f8cd3ca356c6..c9a1a343123d 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -47,6 +47,11 @@ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ PPC_INST_STFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) +#define TEST_PADDI(t, a, i, pr) \ + ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \ + PPC_RAW_ADDI(t, a, i)) + + static void __init init_pt_regs(struct pt_regs *regs) { static unsigned long msr; @@ -634,6 +639,11 @@ struct compute_test { } subtests[MAX_SUBTESTS + 1]; }; +/* Extreme values for si0||si1 (the MLS:D-form 34 bit immediate field) */ +#define SI_MIN BIT(33) +#define SI_MAX (BIT(33) - 1) +#define SI_UMAX (BIT(34) - 1) + static struct compute_test compute_tests[] = { { .mnemonic = "nop", @@ -1006,6 +1016,121 @@ static struct compute_test compute_tests[] = { } } } + }, + { + .mnemonic = "paddi", + .cpu_feature = CPU_FTR_ARCH_31, + .subtests = { + { + .descr = "RA = LONG_MIN, SI = SI_MIN, R = 0", + .instr = TEST_PADDI(21, 22, SI_MIN, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = LONG_MIN, SI = SI_MAX, R = 0", + .instr = TEST_PADDI(21, 22, SI_MAX, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = LONG_MAX, SI = SI_MAX, R = 0", + .instr = TEST_PADDI(21, 22, SI_MAX, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, SI = SI_UMAX, R = 0", + .instr = TEST_PADDI(21, 22, SI_UMAX, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = ULONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, SI = 0x1, R = 0", + .instr = TEST_PADDI(21, 22, 0x1, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = ULONG_MAX, + } + }, + { + .descr = "RA = INT_MIN, SI = SI_MIN, R = 0", + .instr = TEST_PADDI(21, 22, SI_MIN, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = INT_MIN, + } + }, + { + .descr = "RA = INT_MIN, SI = SI_MAX, R = 0", + .instr = TEST_PADDI(21, 22, SI_MAX, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = INT_MIN, + } + }, + { + .descr = "RA = INT_MAX, SI = SI_MAX, R = 0", + .instr = TEST_PADDI(21, 22, SI_MAX, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, SI = 0x1, R = 0", + .instr = TEST_PADDI(21, 22, 0x1, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = UINT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, SI = SI_MAX, R = 0", + .instr = TEST_PADDI(21, 22, SI_MAX, 0), + .regs = { + .gpr[21] = 0, + .gpr[22] = UINT_MAX, + } + }, + { + .descr = "RA is r0, SI = SI_MIN, R = 0", + .instr = TEST_PADDI(21, 0, SI_MIN, 0), + .regs = { + .gpr[21] = 0x0, + } + }, + { + .descr = "RA = 0, SI = SI_MIN, R = 0", + .instr = TEST_PADDI(21, 22, SI_MIN, 0), + .regs = { + .gpr[21] = 0x0, + .gpr[22] = 0x0, + } + }, + { + .descr = "RA is r0, SI = 0, R = 1", + .instr = TEST_PADDI(21, 0, 0, 1), + .regs = { + .gpr[21] = 0, + } + }, + { + .descr = "RA is r0, SI = SI_MIN, R = 1", + .instr = TEST_PADDI(21, 0, SI_MIN, 1), + .regs = { + .gpr[21] = 0, + } + } + } } }; diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S index 1580f34f4f4f..9ef941d958d8 100644 --- a/arch/powerpc/lib/test_emulate_step_exec_instr.S +++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S @@ -80,7 +80,9 @@ _GLOBAL(exec_instr) REST_NVGPRS(r31) /* Placeholder for the test instruction */ + .balign 64 1: nop + nop patch_site 1b patch__exec_instr /* -- cgit v1.2.3 From 50428fdc53ba48f6936b10dfdc0d644972403908 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Tue, 2 Jun 2020 15:27:25 +1000 Subject: powerpc: Add a ppc_inst_as_str() helper There are quite a few places where instructions are printed, this is done using a '%x' format specifier. With the introduction of prefixed instructions, this does not work well. Currently in these places, ppc_inst_val() is used for the value for %x so only the first word of prefixed instructions are printed. When the instructions are word instructions, only a single word should be printed. For prefixed instructions both the prefix and suffix should be printed. To accommodate both of these situations, instead of a '%x' specifier use '%s' and introduce a helper, __ppc_inst_as_str() which returns a char *. The char * __ppc_inst_as_str() returns is buffer that is passed to it by the caller. It is cumbersome to require every caller of __ppc_inst_as_str() to now declare a buffer. To make it more convenient to use __ppc_inst_as_str(), wrap it in a macro that uses a compound statement to allocate a buffer on the caller's stack before calling it. Signed-off-by: Jordan Niethe Reviewed-by: Joel Stanley Acked-by: Segher Boessenkool [mpe: Drop 0x prefix to match most existings uses, especially xmon] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200602052728.18227-1-jniethe5@gmail.com --- arch/powerpc/include/asm/inst.h | 19 +++++++++++++++++++ arch/powerpc/kernel/kprobes.c | 2 +- arch/powerpc/kernel/trace/ftrace.c | 26 +++++++++++++------------- arch/powerpc/lib/test_emulate_step.c | 4 ++-- arch/powerpc/xmon/xmon.c | 2 +- 5 files changed, 36 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index 45f3ec868258..cc73c1267572 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -122,6 +122,25 @@ static inline u64 ppc_inst_as_u64(struct ppc_inst x) #endif } +#define PPC_INST_STR_LEN sizeof("00000000 00000000") + +static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_inst x) +{ + if (ppc_inst_prefixed(x)) + sprintf(str, "%08x %08x", ppc_inst_val(x), ppc_inst_suffix(x)); + else + sprintf(str, "%08x", ppc_inst_val(x)); + + return str; +} + +#define ppc_inst_as_str(x) \ +({ \ + char __str[PPC_INST_STR_LEN]; \ + __ppc_inst_as_str(__str, x); \ + __str; \ +}) + int probe_user_read_inst(struct ppc_inst *inst, struct ppc_inst __user *nip); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 9cc792a3a6a9..6ab9b4d037c3 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -244,7 +244,7 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) * So, we should never get here... but, its still * good to catch them, just in case... */ - printk("Can't step on instruction %x\n", ppc_inst_val(insn)); + printk("Can't step on instruction %s\n", ppc_inst_as_str(insn)); BUG(); } else { /* diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index c1fede6ec934..42761ebec9f7 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -73,8 +73,8 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) /* Make sure it is what we expect it to be */ if (!ppc_inst_equal(replaced, old)) { - pr_err("%p: replaced (%#x) != old (%#x)", - (void *)ip, ppc_inst_val(replaced), ppc_inst_val(old)); + pr_err("%p: replaced (%s) != old (%s)", + (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old)); return -EINVAL; } @@ -137,7 +137,7 @@ __ftrace_make_nop(struct module *mod, /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); + pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); return -EINVAL; } @@ -172,8 +172,8 @@ __ftrace_make_nop(struct module *mod, /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) && !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) { - pr_err("Unexpected instruction %08x around bl _mcount\n", - ppc_inst_val(op)); + pr_err("Unexpected instruction %s around bl _mcount\n", + ppc_inst_as_str(op)); return -EINVAL; } #else @@ -203,7 +203,7 @@ __ftrace_make_nop(struct module *mod, } if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) { - pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, ppc_inst_val(op)); + pr_err("Expected %08x found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op)); return -EINVAL; } #endif /* CONFIG_MPROFILE_KERNEL */ @@ -231,7 +231,7 @@ __ftrace_make_nop(struct module *mod, /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); + pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); return -EINVAL; } @@ -406,7 +406,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); + pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); return -EINVAL; } @@ -533,8 +533,8 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; if (!expected_nop_sequence(ip, op[0], op[1])) { - pr_err("Unexpected call sequence at %p: %x %x\n", - ip, ppc_inst_val(op[0]), ppc_inst_val(op[1])); + pr_err("Unexpected call sequence at %p: %s %s\n", + ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1])); return -EINVAL; } @@ -597,7 +597,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* It should be pointing to a nop */ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) { - pr_err("Expected NOP but have %x\n", ppc_inst_val(op)); + pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op)); return -EINVAL; } @@ -654,7 +654,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) } if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) { - pr_err("Unexpected call sequence at %p: %x\n", ip, ppc_inst_val(op)); + pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op)); return -EINVAL; } @@ -733,7 +733,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, /* Make sure that that this is still a 24bit jump */ if (!is_bl_op(op)) { - pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op)); + pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); return -EINVAL; } diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index c9a1a343123d..e25e92750416 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1147,7 +1147,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs, if (analyse_instr(&op, regs, instr) != 1 || GETTYPE(op.type) != COMPUTE) { - pr_info("emulation failed, instruction = 0x%08x\n", ppc_inst_val(instr)); + pr_info("execution failed, instruction = %s\n", ppc_inst_as_str(instr)); return -EFAULT; } @@ -1167,7 +1167,7 @@ static int __init execute_compute_instr(struct pt_regs *regs, /* Patch the NOP with the actual instruction */ patch_instruction_site(&patch__exec_instr, instr); if (exec_instr(regs)) { - pr_info("execution failed, instruction = 0x%08x\n", ppc_inst_val(instr)); + pr_info("execution failed, instruction = %s\n", ppc_inst_as_str(instr)); return -EFAULT; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index f08b2ef1995a..8df83a744528 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2977,7 +2977,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, dotted = 0; last_inst = inst; if (praddr) - printf(REG" %.8x", adr, ppc_inst_val(inst)); + printf(REG" %s", adr, ppc_inst_as_str(inst)); printf("\t"); dump_func(ppc_inst_val(inst), adr); printf("\n"); -- cgit v1.2.3 From 8b98afc117aaf825c66d7ddd59f1849e559b42cd Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Tue, 2 Jun 2020 15:27:26 +1000 Subject: powerpc/xmon: Improve dumping prefixed instructions Currently prefixed instructions are dumped as two separate word instructions. Use mread_instr() so that prefixed instructions are read as such and update the incrementor in the loop to take this into account. 'dump_func' is print_insn_powerpc() which comes from ppc-dis.c which is taken from binutils. When this is updated prefixed instructions will be disassembled. Currently dumping prefixed instructions looks like this: 0:mon> di c000000000094168 c000000000094168 0x06000000 .long 0x6000000 c00000000009416c 0x392a0003 addi r9,r10,3 c000000000094170 0x913f0028 stw r9,40(r31) c000000000094174 0xe93f002a lwa r9,40(r31) c000000000094178 0x7d234b78 mr r3,r9 c00000000009417c 0x383f0040 addi r1,r31,64 c000000000094180 0xebe1fff8 ld r31,-8(r1) c000000000094184 0x4e800020 blr c000000000094188 0x60000000 nop ... c000000000094190 0x3c4c0121 addis r2,r12,289 c000000000094194 0x38429670 addi r2,r2,-27024 c000000000094198 0x7c0802a6 mflr r0 c00000000009419c 0x60000000 nop c0000000000941a0 0xe9240100 ld r9,256(r4) c0000000000941a4 0x39400001 li r10,1 After this it looks like: 0:mon> di c000000000094168 c000000000094168 0x06000000 0x392a0003 .long 0x392a000306000000 c000000000094170 0x913f0028 stw r9,40(r31) c000000000094174 0xe93f002a lwa r9,40(r31) c000000000094178 0x7d234b78 mr r3,r9 c00000000009417c 0x383f0040 addi r1,r31,64 c000000000094180 0xebe1fff8 ld r31,-8(r1) c000000000094184 0x4e800020 blr c000000000094188 0x60000000 nop ... c000000000094190 0x3c4c0121 addis r2,r12,289 c000000000094194 0x38429570 addi r2,r2,-27280 c000000000094198 0x7c0802a6 mflr r0 c00000000009419c 0x60000000 nop c0000000000941a0 0xe9240100 ld r9,256(r4) c0000000000941a4 0x39400001 li r10,1 c0000000000941a8 0x3d02000b addis r8,r2,11 Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200602052728.18227-2-jniethe5@gmail.com --- arch/powerpc/xmon/xmon.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 8df83a744528..1a240b4b76f9 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2954,11 +2954,10 @@ generic_inst_dump(unsigned long adr, long count, int praddr, int nr, dotted; unsigned long first_adr; struct ppc_inst inst, last_inst = ppc_inst(0); - unsigned char val[4]; dotted = 0; - for (first_adr = adr; count > 0; --count, adr += 4) { - nr = mread(adr, val, 4); + for (first_adr = adr; count > 0; --count, adr += ppc_inst_len(inst)) { + nr = mread_instr(adr, &inst); if (nr == 0) { if (praddr) { const char *x = fault_chars[fault_type]; @@ -2966,7 +2965,6 @@ generic_inst_dump(unsigned long adr, long count, int praddr, } break; } - inst = ppc_inst(GETWORD(val)); if (adr > first_adr && ppc_inst_equal(inst, last_inst)) { if (!dotted) { printf(" ...\n"); @@ -2979,7 +2977,10 @@ generic_inst_dump(unsigned long adr, long count, int praddr, if (praddr) printf(REG" %s", adr, ppc_inst_as_str(inst)); printf("\t"); - dump_func(ppc_inst_val(inst), adr); + if (!ppc_inst_prefixed(inst)) + dump_func(ppc_inst_val(inst), adr); + else + dump_func(ppc_inst_as_u64(inst), adr); printf("\n"); } return adr - first_adr; -- cgit v1.2.3 From 93c3a0ba2a0863a5c82a518d64044434f82a57f5 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Fri, 26 Jun 2020 15:21:55 +0530 Subject: powerpc/test_emulate_step: Enhancement to test negative scenarios add provision to declare test is a negative scenario, verify whether emulation fails and avoid executing it. Signed-off-by: Balamuruhan S Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200626095158.1031507-2-bala24@linux.ibm.com --- arch/powerpc/lib/test_emulate_step.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index e25e92750416..31065e986ff0 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -18,6 +18,7 @@ #define IGNORE_GPR(n) (0x1UL << (n)) #define IGNORE_XER (0x1UL << 32) #define IGNORE_CCR (0x1UL << 33) +#define NEGATIVE_TEST (0x1UL << 63) #define TEST_PLD(r, base, i, pr) \ ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \ @@ -1135,8 +1136,10 @@ static struct compute_test compute_tests[] = { }; static int __init emulate_compute_instr(struct pt_regs *regs, - struct ppc_inst instr) + struct ppc_inst instr, + bool negative) { + int analysed; extern s32 patch__exec_instr; struct instruction_op op; @@ -1145,13 +1148,17 @@ static int __init emulate_compute_instr(struct pt_regs *regs, regs->nip = patch_site_addr(&patch__exec_instr); - if (analyse_instr(&op, regs, instr) != 1 || - GETTYPE(op.type) != COMPUTE) { - pr_info("execution failed, instruction = %s\n", ppc_inst_as_str(instr)); + analysed = analyse_instr(&op, regs, instr); + if (analysed != 1 || GETTYPE(op.type) != COMPUTE) { + if (negative) + return -EFAULT; + pr_info("emulation failed, instruction = %s\n", ppc_inst_as_str(instr)); return -EFAULT; } - - emulate_update_regs(regs, &op); + if (analysed == 1 && negative) + pr_info("negative test failed, instruction = %s\n", ppc_inst_as_str(instr)); + if (!negative) + emulate_update_regs(regs, &op); return 0; } @@ -1189,7 +1196,7 @@ static void __init run_tests_compute(void) struct pt_regs *regs, exp, got; unsigned int i, j, k; struct ppc_inst instr; - bool ignore_gpr, ignore_xer, ignore_ccr, passed; + bool ignore_gpr, ignore_xer, ignore_ccr, passed, rc, negative; for (i = 0; i < ARRAY_SIZE(compute_tests); i++) { test = &compute_tests[i]; @@ -1203,6 +1210,7 @@ static void __init run_tests_compute(void) instr = test->subtests[j].instr; flags = test->subtests[j].flags; regs = &test->subtests[j].regs; + negative = flags & NEGATIVE_TEST; ignore_xer = flags & IGNORE_XER; ignore_ccr = flags & IGNORE_CCR; passed = true; @@ -1217,8 +1225,12 @@ static void __init run_tests_compute(void) exp.msr = MSR_KERNEL; got.msr = MSR_KERNEL; - if (emulate_compute_instr(&got, instr) || - execute_compute_instr(&exp, instr)) { + rc = emulate_compute_instr(&got, instr, negative) != 0; + if (negative) { + /* skip executing instruction */ + passed = rc; + goto print; + } else if (rc || execute_compute_instr(&exp, instr)) { passed = false; goto print; } -- cgit v1.2.3 From 7e67c73b939b25d4ad18a536e52282aa35d8ee56 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Fri, 26 Jun 2020 15:21:56 +0530 Subject: powerpc/test_emulate_step: Add negative tests for prefixed addi testcases for `paddi` instruction to cover the negative case, if R is equal to 1 and RA is not equal to 0, the instruction form is invalid. Signed-off-by: Balamuruhan S Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200626095158.1031507-3-bala24@linux.ibm.com --- arch/powerpc/lib/test_emulate_step.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 31065e986ff0..7d11ab5ef86c 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1130,6 +1130,16 @@ static struct compute_test compute_tests[] = { .regs = { .gpr[21] = 0, } + }, + /* Invalid instruction form with R = 1 and RA != 0 */ + { + .descr = "RA = R22(0), SI = 0, R = 1", + .instr = TEST_PADDI(21, 22, 0, 1), + .flags = NEGATIVE_TEST, + .regs = { + .gpr[21] = 0, + .gpr[22] = 0, + } } } } -- cgit v1.2.3 From 68a180a44c29d7e918ae7d3c18a01b0751d1c22f Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Fri, 26 Jun 2020 15:21:57 +0530 Subject: powerpc/sstep: Introduce macros to retrieve Prefix instruction operands retrieve prefix instruction operands RA and pc relative bit R values using macros and adopt it in sstep.c and test_emulate_step.c. Signed-off-by: Balamuruhan S Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200626095158.1031507-4-bala24@linux.ibm.com --- arch/powerpc/include/asm/sstep.h | 4 ++++ arch/powerpc/lib/sstep.c | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 3b01c69a44aa..325975b4ef30 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -104,6 +104,10 @@ enum instruction_type { #define MKOP(t, f, s) ((t) | (f) | SIZE(s)) +/* Prefix instruction operands */ +#define GET_PREFIX_RA(i) (((i) >> 16) & 0x1f) +#define GET_PREFIX_R(i) ((i) & (1ul << 20)) + struct instruction_op { int type; int reg; diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5abe98216dc2..fb4c5767663d 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -200,8 +200,8 @@ static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, unsigned int dd; unsigned long ea, d0, d1, d; - prefix_r = instr & (1ul << 20); - ra = (suffix >> 16) & 0x1f; + prefix_r = GET_PREFIX_R(instr); + ra = GET_PREFIX_RA(suffix); d0 = instr & 0x3ffff; d1 = suffix & 0xffff; @@ -1339,8 +1339,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, switch (opcode) { #ifdef __powerpc64__ case 1: - prefix_r = word & (1ul << 20); - ra = (suffix >> 16) & 0x1f; + prefix_r = GET_PREFIX_R(word); + ra = GET_PREFIX_RA(suffix); rd = (suffix >> 21) & 0x1f; op->reg = rd; op->val = regs->gpr[rd]; @@ -2715,8 +2715,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } break; case 1: /* Prefixed instructions */ - prefix_r = word & (1ul << 20); - ra = (suffix >> 16) & 0x1f; + prefix_r = GET_PREFIX_R(word); + ra = GET_PREFIX_RA(suffix); op->update_reg = ra; rd = (suffix >> 21) & 0x1f; op->reg = rd; -- cgit v1.2.3 From e93ad65e3611b06288efdf0cfd76c012df3feec1 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Fri, 26 Jun 2020 15:21:58 +0530 Subject: powerpc/test_emulate_step: Move extern declaration to sstep.h fix checkpatch.pl warnings by moving extern declaration from source file to headerfile. Signed-off-by: Balamuruhan S Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200626095158.1031507-5-bala24@linux.ibm.com --- arch/powerpc/include/asm/sstep.h | 2 ++ arch/powerpc/lib/test_emulate_step.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 325975b4ef30..c8e37ef060c1 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -108,6 +108,8 @@ enum instruction_type { #define GET_PREFIX_RA(i) (((i) >> 16) & 0x1f) #define GET_PREFIX_R(i) ((i) & (1ul << 20)) +extern s32 patch__exec_instr; + struct instruction_op { int type; int reg; diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 7d11ab5ef86c..081b05480c47 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1150,7 +1150,6 @@ static int __init emulate_compute_instr(struct pt_regs *regs, bool negative) { int analysed; - extern s32 patch__exec_instr; struct instruction_op op; if (!regs || !ppc_inst_val(instr)) @@ -1176,7 +1175,6 @@ static int __init execute_compute_instr(struct pt_regs *regs, struct ppc_inst instr) { extern int exec_instr(struct pt_regs *regs); - extern s32 patch__exec_instr; if (!regs || !ppc_inst_val(instr)) return -EINVAL; -- cgit v1.2.3 From 147c13413c04bc6a2bd76f2503402905e5e98cff Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Thu, 16 Jul 2020 12:38:20 -0700 Subject: powerpc/64: Fix an out of date comment about MMIO ordering This primitive has been renamed, but because it was spelled incorrectly in the first place it must have escaped the fixup patch. As far as I can tell this logic is still correct: smp_mb__after_spinlock() uses the default smp_mb() implementation, which is "sync" rather than "hwsync" but those are the same (though I'm not that familiar with PowerPC). Signed-off-by: Palmer Dabbelt Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200716193820.1141936-1-palmer@dabbelt.com --- arch/powerpc/kernel/entry_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index da85c2511e57..2547c5dac07a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -354,7 +354,7 @@ _GLOBAL(_switch) * kernel/sched/core.c). * * Uncacheable stores in the case of involuntary preemption must - * be taken care of. The smp_mb__before_spin_lock() in __schedule() + * be taken care of. The smp_mb__after_spinlock() in __schedule() * is implemented as hwsync on powerpc, which orders MMIO too. So * long as there is an hwsync in the context switch path, it will * be executed on the source CPU after the task has performed -- cgit v1.2.3 From 2384b36f9156c3b815a5ce5f694edc5054ab7625 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 16 Jul 2020 11:35:22 +1000 Subject: powerpc: Select ARCH_HAS_MEMBARRIER_SYNC_CORE powerpc return from interrupt and return from system call sequences are context synchronising. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200716013522.338318-1-npiggin@gmail.com --- .../features/sched/membarrier-sync-core/arch-support.txt | 4 ++-- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/exception-64e.h | 6 +++++- arch/powerpc/include/asm/exception-64s.h | 8 ++++++++ arch/powerpc/kernel/entry_32.S | 6 ++++++ 5 files changed, 22 insertions(+), 3 deletions(-) diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt index 8a521a622966..52ad74a25f54 100644 --- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt +++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt @@ -5,7 +5,7 @@ # # Architecture requirements # -# * arm/arm64 +# * arm/arm64/powerpc # # Rely on implicit context synchronization as a result of exception return # when returning from IPI handler, and when returning to user-space. @@ -45,7 +45,7 @@ | nios2: | TODO | | openrisc: | TODO | | parisc: | TODO | - | powerpc: | TODO | + | powerpc: | ok | | riscv: | TODO | | s390: | TODO | | sh: | TODO | diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 70adf69f8b1a..81c0dee1cbff 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -131,6 +131,7 @@ config PPC select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index 54a98ef7d7fe..72b6657acd2d 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -204,7 +204,11 @@ exc_##label##_book3e: LOAD_REG_ADDR(r3,interrupt_base_book3e);\ ori r3,r3,vector_offset@l; \ mtspr SPRN_IVOR##vector_number,r3; - +/* + * powerpc relies on return from interrupt/syscall being context synchronising + * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional + * synchronisation instructions. + */ #define RFI_TO_KERNEL \ rfi diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 47bd4ea0837d..d7a1a427a690 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -68,6 +68,14 @@ * * The nop instructions allow us to insert one or more instructions to flush the * L1-D cache when returning to userspace or a guest. + * + * powerpc relies on return from interrupt/syscall being context synchronising + * (which hrfid, rfid, and rfscv are) to support ARCH_HAS_MEMBARRIER_SYNC_CORE + * without additional synchronisation instructions. + * + * soft-masked interrupt replay does not include a context-synchronising rfid, + * but those always return to kernel, the sync is only required when returning + * to user. */ #define RFI_FLUSH_SLOT \ RFI_FLUSH_FIXUP_SECTION; \ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 217ebdf5b00b..f4d0af8e1136 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -35,6 +35,12 @@ #include "head_32.h" +/* + * powerpc relies on return from interrupt/syscall being context synchronising + * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional + * synchronisation instructions. + */ + /* * Align to 4k in order to ensure that all functions modyfing srr0/srr1 * fit into one page in order to not encounter a TLB miss between the -- cgit v1.2.3 From 5a090f7c363fdc09b99222eae679506a58e7cc68 Mon Sep 17 00:00:00 2001 From: Wen Xiong Date: Mon, 13 Jul 2020 09:39:33 -0500 Subject: powerpc/pseries: PCIE PHB reset Several device drivers hit EEH(Extended Error handling) when triggering kdump on Pseries PowerVM. This patch implemented a reset of the PHBs in pci general code when triggering kdump. PHB reset stop all PCI transactions from normal kernel. We have tested the patch in several enviroments: - direct slot adapters - adapters under the switch - a VF adapter in PowerVM - a VF adapter/adapter in KVM guest. Signed-off-by: Wen Xiong [mpe: Fix broken whitespace, subject & SOB formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1594651173-32166-1-git-send-email-wenxiong@linux.vnet.ibm.com --- arch/powerpc/platforms/pseries/eeh_pseries.c | 232 +++++++++++++++++++-------- 1 file changed, 169 insertions(+), 63 deletions(-) diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index ace117f99d94..47760fb515cc 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -80,6 +81,152 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev) eeh_probe_device(pdev); } + +/** + * pseries_eeh_get_config_addr - Retrieve config address + * + * Retrieve the assocated config address. Actually, there're 2 RTAS + * function calls dedicated for the purpose. We need implement + * it through the new function and then the old one. Besides, + * you should make sure the config address is figured out from + * FDT node before calling the function. + * + * It's notable that zero'ed return value means invalid PE config + * address. + */ +static int pseries_eeh_get_config_addr(struct pci_controller *phb, int config_addr) +{ + int ret = 0; + int rets[3]; + + if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { + /* + * First of all, we need to make sure there has one PE + * associated with the device. Otherwise, PE address is + * meaningless. + */ + ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, + config_addr, BUID_HI(phb->buid), + BUID_LO(phb->buid), 1); + if (ret || (rets[0] == 0)) + return 0; + + /* Retrieve the associated PE config address */ + ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, + config_addr, BUID_HI(phb->buid), + BUID_LO(phb->buid), 0); + if (ret) { + pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", + __func__, phb->global_number, config_addr); + return 0; + } + + return rets[0]; + } + + if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { + ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, + config_addr, BUID_HI(phb->buid), + BUID_LO(phb->buid), 0); + if (ret) { + pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", + __func__, phb->global_number, config_addr); + return 0; + } + + return rets[0]; + } + + return ret; +} + +/** + * pseries_eeh_phb_reset - Reset the specified PHB + * @phb: PCI controller + * @config_adddr: the associated config address + * @option: reset option + * + * Reset the specified PHB/PE + */ +static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, int option) +{ + int ret; + + /* Reset PE through RTAS call */ + ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, + config_addr, BUID_HI(phb->buid), + BUID_LO(phb->buid), option); + + /* If fundamental-reset not supported, try hot-reset */ + if (option == EEH_RESET_FUNDAMENTAL && + ret == -8) { + option = EEH_RESET_HOT; + ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, + config_addr, BUID_HI(phb->buid), + BUID_LO(phb->buid), option); + } + + /* We need reset hold or settlement delay */ + if (option == EEH_RESET_FUNDAMENTAL || + option == EEH_RESET_HOT) + msleep(EEH_PE_RST_HOLD_TIME); + else + msleep(EEH_PE_RST_SETTLE_TIME); + + return ret; +} + +/** + * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE + * @phb: PCI controller + * @config_adddr: the associated config address + * + * The function will be called to reconfigure the bridges included + * in the specified PE so that the mulfunctional PE would be recovered + * again. + */ +static int pseries_eeh_phb_configure_bridge(struct pci_controller *phb, int config_addr) +{ + int ret; + /* Waiting 0.2s maximum before skipping configuration */ + int max_wait = 200; + + while (max_wait > 0) { + ret = rtas_call(ibm_configure_pe, 3, 1, NULL, + config_addr, BUID_HI(phb->buid), + BUID_LO(phb->buid)); + + if (!ret) + return ret; + if (ret < 0) + break; + + /* + * If RTAS returns a delay value that's above 100ms, cut it + * down to 100ms in case firmware made a mistake. For more + * on how these delay values work see rtas_busy_delay_time + */ + if (ret > RTAS_EXTENDED_DELAY_MIN+2 && + ret <= RTAS_EXTENDED_DELAY_MAX) + ret = RTAS_EXTENDED_DELAY_MIN+2; + + max_wait -= rtas_busy_delay_time(ret); + + if (max_wait < 0) + break; + + rtas_busy_delay(ret); + } + + pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", + __func__, phb->global_number, config_addr, ret); + /* PAPR defines -3 as "Parameter Error" for this function: */ + if (ret == -3) + return -EINVAL; + else + return -EIO; +} + /* * Buffer for reporting slot-error-detail rtas calls. Its here * in BSS, and not dynamically alloced, so that it ends up in @@ -96,6 +243,10 @@ static int eeh_error_buf_size; */ static int pseries_eeh_init(void) { + struct pci_controller *phb; + struct pci_dn *pdn; + int addr, config_addr; + /* figure out EEH RTAS function call tokens */ ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); @@ -148,6 +299,22 @@ static int pseries_eeh_init(void) /* Set EEH machine dependent code */ ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device; + if (is_kdump_kernel() || reset_devices) { + pr_info("Issue PHB reset ...\n"); + list_for_each_entry(phb, &hose_list, list_node) { + pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); + addr = (pdn->busno << 16) | (pdn->devfn << 8); + config_addr = pseries_eeh_get_config_addr(phb, addr); + /* invalid PE config addr */ + if (config_addr == 0) + continue; + + pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL); + pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE); + pseries_eeh_phb_configure_bridge(phb, config_addr); + } + } + return 0; } @@ -569,35 +736,13 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) static int pseries_eeh_reset(struct eeh_pe *pe, int option) { int config_addr; - int ret; /* Figure out PE address */ config_addr = pe->config_addr; if (pe->addr) config_addr = pe->addr; - /* Reset PE through RTAS call */ - ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), option); - - /* If fundamental-reset not supported, try hot-reset */ - if (option == EEH_RESET_FUNDAMENTAL && - ret == -8) { - option = EEH_RESET_HOT; - ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), option); - } - - /* We need reset hold or settlement delay */ - if (option == EEH_RESET_FUNDAMENTAL || - option == EEH_RESET_HOT) - msleep(EEH_PE_RST_HOLD_TIME); - else - msleep(EEH_PE_RST_SETTLE_TIME); - - return ret; + return pseries_eeh_phb_reset(pe->phb, config_addr, option); } /** @@ -641,56 +786,17 @@ static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, u * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE * @pe: EEH PE * - * The function will be called to reconfigure the bridges included - * in the specified PE so that the mulfunctional PE would be recovered - * again. */ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) { int config_addr; - int ret; - /* Waiting 0.2s maximum before skipping configuration */ - int max_wait = 200; /* Figure out the PE address */ config_addr = pe->config_addr; if (pe->addr) config_addr = pe->addr; - while (max_wait > 0) { - ret = rtas_call(ibm_configure_pe, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - - if (!ret) - return ret; - if (ret < 0) - break; - - /* - * If RTAS returns a delay value that's above 100ms, cut it - * down to 100ms in case firmware made a mistake. For more - * on how these delay values work see rtas_busy_delay_time - */ - if (ret > RTAS_EXTENDED_DELAY_MIN+2 && - ret <= RTAS_EXTENDED_DELAY_MAX) - ret = RTAS_EXTENDED_DELAY_MIN+2; - - max_wait -= rtas_busy_delay_time(ret); - - if (max_wait < 0) - break; - - rtas_busy_delay(ret); - } - - pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n", - __func__, pe->phb->global_number, pe->addr, ret); - /* PAPR defines -3 as "Parameter Error" for this function: */ - if (ret == -3) - return -EINVAL; - else - return -EIO; + return pseries_eeh_phb_configure_bridge(pe->phb, config_addr); } /** -- cgit v1.2.3 From 201220bb0e8cbc163ec7f550b3b7b3da46eb5877 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 09:33:43 +1000 Subject: powerpc/powernv: Machine check handler for POWER10 Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200702233343.1128026-1-npiggin@gmail.com --- arch/powerpc/include/asm/mce.h | 1 + arch/powerpc/kernel/dt_cpu_ftrs.c | 10 +++++ arch/powerpc/kernel/mce.c | 1 + arch/powerpc/kernel/mce_power.c | 84 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+) diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index 7bdd0cd4f2de..adf2cda67f9a 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -86,6 +86,7 @@ enum MCE_TlbErrorType { enum MCE_UserErrorType { MCE_USER_ERROR_INDETERMINATE = 0, MCE_USER_ERROR_TLBIE = 1, + MCE_USER_ERROR_SCV = 2, }; enum MCE_RaErrorType { diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 0d65d9597778..0a16bb6fa07d 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -66,6 +66,7 @@ struct dt_cpu_feature { extern long __machine_check_early_realmode_p8(struct pt_regs *regs); extern long __machine_check_early_realmode_p9(struct pt_regs *regs); +extern long __machine_check_early_realmode_p10(struct pt_regs *regs); static int hv_mode; @@ -475,6 +476,14 @@ static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f) return 1; } +static int __init feat_enable_mce_power10(struct dt_cpu_feature *f) +{ + cur_cpu_spec->platform = "power10"; + cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10; + + return 1; +} + static int __init feat_enable_tm(struct dt_cpu_feature *f) { #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -663,6 +672,7 @@ static struct dt_cpu_feature_match __initdata {"group-start-register", feat_enable, 0}, {"pc-relative-addressing", feat_enable, 0}, {"machine-check-power9", feat_enable_mce_power9, 0}, + {"machine-check-power10", feat_enable_mce_power10, 0}, {"performance-monitor-power9", feat_enable_pmu_power9, 0}, {"performance-monitor-power10", feat_enable_pmu_power10, 0}, {"event-based-branch-v3", feat_enable, 0}, diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index b7b3ed4e6193..c2fa8ee63720 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -385,6 +385,7 @@ void machine_check_print_event_info(struct machine_check_event *evt, static const char *mc_user_types[] = { "Indeterminate", "tlbie(l) invalid", + "scv invalid", }; static const char *mc_ra_types[] = { "Indeterminate", diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index c3b522bff9b4..b7e173754a2e 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -243,6 +243,45 @@ static const struct mce_ierror_table mce_p9_ierror_table[] = { MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, { 0, 0, 0, 0, 0, 0, 0 } }; +static const struct mce_ierror_table mce_p10_ierror_table[] = { +{ 0x00000000081c0000, 0x0000000000040000, true, + MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000000081c0000, 0x0000000000080000, true, + MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000000081c0000, 0x00000000000c0000, true, + MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000000081c0000, 0x0000000000100000, true, + MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000000081c0000, 0x0000000000140000, true, + MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000000081c0000, 0x0000000000180000, true, + MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000000081c0000, 0x00000000001c0000, true, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN, MCE_ECLASS_SOFTWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000000081c0000, 0x0000000008080000, true, + MCE_ERROR_TYPE_USER,MCE_USER_ERROR_SCV, MCE_ECLASS_SOFTWARE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000000081c0000, 0x00000000080c0000, true, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH, MCE_ECLASS_SOFTWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000000081c0000, 0x0000000008100000, true, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_SOFTWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000000081c0000, 0x0000000008140000, false, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE, MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */ +{ 0x00000000081c0000, 0x00000000081c0000, true, MCE_ECLASS_HARDWARE, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0, 0, 0, 0, 0, 0, 0 } }; + struct mce_derror_table { unsigned long dsisr_value; bool dar_valid; /* dar is a valid indicator of faulting address */ @@ -361,6 +400,46 @@ static const struct mce_derror_table mce_p9_derror_table[] = { MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, { 0, false, 0, 0, 0, 0, 0 } }; +static const struct mce_derror_table mce_p10_derror_table[] = { +{ 0x00008000, false, + MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00004000, true, + MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE, + MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000800, true, + MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000400, true, + MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000200, false, + MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE, MCE_ECLASS_SOFTWARE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000080, true, + MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */ + MCE_ECLASS_SOFT_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_WARNING, true }, +{ 0x00000100, true, + MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000040, true, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD, MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000020, false, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE, + MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000010, false, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN, + MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0x00000008, false, + MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN, MCE_ECLASS_HARDWARE, + MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true }, +{ 0, false, 0, 0, 0, 0, 0 } }; + static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, uint64_t *phys_addr) { @@ -657,3 +736,8 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs) return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); } + +long __machine_check_early_realmode_p10(struct pt_regs *regs) +{ + return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table); +} -- cgit v1.2.3 From 7c7ff885c7bce40a487e41c68f1dac14dd2c8033 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Jun 2020 10:55:54 +0200 Subject: powerpc/spufs: Fix the type of ret in spufs_arch_write_note Both the ->dump method and snprintf return an int. So switch to an int and properly handle errors from ->dump. Fixes: 5456ffdee666 ("powerpc/spufs: simplify spufs core dumping") Reported-by: kbuild test robot Signed-off-by: Christoph Hellwig Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200610085554.5647-1-hch@lst.de --- arch/powerpc/platforms/cell/spufs/coredump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 40dd7b483f06..026c181a98c5 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -118,7 +118,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, size_t sz = spufs_coredump_read[i].size; char fullname[80]; struct elf_note en; - size_t ret; + int ret; sprintf(fullname, "SPU/%d/%s", dfd, spufs_coredump_read[i].name); en.n_namesz = strlen(fullname) + 1; -- cgit v1.2.3 From 0f10228c6ff6af36cbb31af35b01f76cdb0b3fc1 Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 6 Jul 2020 21:48:12 -0300 Subject: KVM: PPC: Fix typo on H_DISABLE_AND_GET hcall On PAPR+ the hcall() on 0x1B0 is called H_DISABLE_AND_GET, but got defined as H_DISABLE_AND_GETC instead. This define was introduced with a typo in commit ("[PATCH] powerpc: Extends HCALL interface for InfiniBand usage"), and was later used without having the typo noticed. Signed-off-by: Leonardo Bras Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200707004812.190765-1-leobras.c@gmail.com --- arch/powerpc/include/asm/hvcall.h | 2 +- arch/powerpc/kvm/trace_hv.h | 2 +- tools/perf/arch/powerpc/util/book3s_hcalls.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 43486e773bd6..8d7ee3b533c4 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -237,7 +237,7 @@ #define H_CREATE_RPT 0x1A4 #define H_REMOVE_RPT 0x1A8 #define H_REGISTER_RPAGES 0x1AC -#define H_DISABLE_AND_GETC 0x1B0 +#define H_DISABLE_AND_GET 0x1B0 #define H_ERROR_DATA 0x1B4 #define H_GET_HCA_INFO 0x1B8 #define H_GET_PERF_COUNT 0x1BC diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index 4a61a971c34e..830a126e095d 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -89,7 +89,7 @@ {H_CREATE_RPT, "H_CREATE_RPT"}, \ {H_REMOVE_RPT, "H_REMOVE_RPT"}, \ {H_REGISTER_RPAGES, "H_REGISTER_RPAGES"}, \ - {H_DISABLE_AND_GETC, "H_DISABLE_AND_GETC"}, \ + {H_DISABLE_AND_GET, "H_DISABLE_AND_GET"}, \ {H_ERROR_DATA, "H_ERROR_DATA"}, \ {H_GET_HCA_INFO, "H_GET_HCA_INFO"}, \ {H_GET_PERF_COUNT, "H_GET_PERF_COUNT"}, \ diff --git a/tools/perf/arch/powerpc/util/book3s_hcalls.h b/tools/perf/arch/powerpc/util/book3s_hcalls.h index 54cfa0530e86..488f4339b83c 100644 --- a/tools/perf/arch/powerpc/util/book3s_hcalls.h +++ b/tools/perf/arch/powerpc/util/book3s_hcalls.h @@ -84,7 +84,7 @@ {0x1a4, "H_CREATE_RPT"}, \ {0x1a8, "H_REMOVE_RPT"}, \ {0x1ac, "H_REGISTER_RPAGES"}, \ - {0x1b0, "H_DISABLE_AND_GETC"}, \ + {0x1b0, "H_DISABLE_AND_GET"}, \ {0x1b4, "H_ERROR_DATA"}, \ {0x1b8, "H_GET_HCA_INFO"}, \ {0x1bc, "H_GET_PERF_COUNT"}, \ -- cgit v1.2.3 From 69507b984ddce803df81215cc7813825189adafa Mon Sep 17 00:00:00 2001 From: Santosh Sivaraj Date: Tue, 21 Jul 2020 14:49:15 +0530 Subject: powerpc/mm/hash64: Remove comment that is no longer valid hash_low_64.S was removed in commit a43c0eb8364c ("powerpc/mm: Convert 4k insert from asm to C") and flush_hash_page() is no longer called from any assembly routine. Signed-off-by: Santosh Sivaraj [mpe: Tweak comment wording] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200721091915.205006-1-santosh@fossix.org --- arch/powerpc/mm/book3s64/hash_utils.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6f9f346a5f65..9fdabea04990 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1707,10 +1707,6 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, return gslot; } -/* - * WARNING: This is called from hash_low_64.S, if you change this prototype, - * do not forget to update the assembly call site ! - */ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, unsigned long flags) { -- cgit v1.2.3 From 8747bf36f312356f8a295a0c39ff092d65ce75ae Mon Sep 17 00:00:00 2001 From: Pratik Rajesh Sampat Date: Tue, 21 Jul 2020 21:07:06 +0530 Subject: powerpc/powernv/idle: Replace CPU feature check with PVR check The POWER9 idle driver contains implementation-specific details that means it is not suitable to run on any processor that implements ISA v3.0 (e.g., POWER10), so only init the driver when running on a POWER9. Signed-off-by: Pratik Rajesh Sampat [mpe: Use updated change log from Nick] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200721153708.89057-2-psampat@linux.ibm.com --- arch/powerpc/platforms/powernv/idle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 1c9d0a9d50e0..3a604d7ac91d 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void) return; } - if (cpu_has_feature(CPU_FTR_ARCH_300)) + if (pvr_version_is(PVR_POWER9)) pnv_power9_idle_init(); for (i = 0; i < nr_pnv_idle_states; i++) -- cgit v1.2.3 From dcbbfa6b05daca94ebcdbce80a7cf05c717d2942 Mon Sep 17 00:00:00 2001 From: Pratik Rajesh Sampat Date: Tue, 21 Jul 2020 21:07:07 +0530 Subject: powerpc/powernv/idle: Rename pnv_first_spr_loss_level variable Replace the variable name from using "pnv_first_spr_loss_level" to "deep_spr_loss_state". pnv_first_spr_loss_level is supposed to be the earliest state that has OPAL_PM_LOSE_FULL_CONTEXT set, in other places the kernel uses the "deep" states as terminology. Hence renaming the variable to be coherent to its semantics. Signed-off-by: Pratik Rajesh Sampat Acked-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200721153708.89057-3-psampat@linux.ibm.com --- arch/powerpc/platforms/powernv/idle.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 3a604d7ac91d..ed09f1fe16fe 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -48,7 +48,7 @@ static bool default_stop_found; * First stop state levels when SPR and TB loss can occur. */ static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; -static u64 pnv_first_spr_loss_level = MAX_STOP_STATE + 1; +static u64 deep_spr_loss_state = MAX_STOP_STATE + 1; /* * psscr value and mask of the deepest stop idle state. @@ -673,7 +673,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) mmcra = mfspr(SPRN_MMCRA); } - if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) { + if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { sprs.lpcr = mfspr(SPRN_LPCR); sprs.hfscr = mfspr(SPRN_HFSCR); sprs.fscr = mfspr(SPRN_FSCR); @@ -759,7 +759,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) * just always test PSSCR for SPR/TB state loss. */ pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; - if (likely(pls < pnv_first_spr_loss_level)) { + if (likely(pls < deep_spr_loss_state)) { if (sprs_saved) atomic_stop_thread_idle(); goto out; @@ -1106,7 +1106,7 @@ static void __init pnv_power9_idle_init(void) * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state. */ pnv_first_tb_loss_level = MAX_STOP_STATE + 1; - pnv_first_spr_loss_level = MAX_STOP_STATE + 1; + deep_spr_loss_state = MAX_STOP_STATE + 1; for (i = 0; i < nr_pnv_idle_states; i++) { int err; struct pnv_idle_states_t *state = &pnv_idle_states[i]; @@ -1117,8 +1117,8 @@ static void __init pnv_power9_idle_init(void) pnv_first_tb_loss_level = psscr_rl; if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) && - (pnv_first_spr_loss_level > psscr_rl)) - pnv_first_spr_loss_level = psscr_rl; + (deep_spr_loss_state > psscr_rl)) + deep_spr_loss_state = psscr_rl; /* * The idle code does not deal with TB loss occurring @@ -1129,8 +1129,8 @@ static void __init pnv_power9_idle_init(void) * compatibility. */ if ((state->flags & OPAL_PM_TIMEBASE_STOP) && - (pnv_first_spr_loss_level > psscr_rl)) - pnv_first_spr_loss_level = psscr_rl; + (deep_spr_loss_state > psscr_rl)) + deep_spr_loss_state = psscr_rl; err = validate_psscr_val_mask(&state->psscr_val, &state->psscr_mask, @@ -1176,7 +1176,7 @@ static void __init pnv_power9_idle_init(void) } pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n", - pnv_first_spr_loss_level); + deep_spr_loss_state); pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n", pnv_first_tb_loss_level); -- cgit v1.2.3 From 5c92fb1b46102e1efe0eed69e743f711bc1c7d2e Mon Sep 17 00:00:00 2001 From: Pratik Rajesh Sampat Date: Tue, 21 Jul 2020 21:07:08 +0530 Subject: powerpc/powernv/idle: Exclude mfspr on HID1, 4, 5 on P9 and above POWER9 onwards the support for the registers HID1, HID4, HID5 has been receded. Although mfspr on the above registers worked in Power9, In Power10 simulator is unrecognized. Moving their assignment under the check for machines lower than Power9 Signed-off-by: Pratik Rajesh Sampat Reviewed-by: Gautham R. Shenoy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200721153708.89057-4-psampat@linux.ibm.com --- arch/powerpc/platforms/powernv/idle.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index ed09f1fe16fe..77513a80cef9 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -73,9 +73,6 @@ static int pnv_save_sprs_for_deep_states(void) */ uint64_t lpcr_val = mfspr(SPRN_LPCR); uint64_t hid0_val = mfspr(SPRN_HID0); - uint64_t hid1_val = mfspr(SPRN_HID1); - uint64_t hid4_val = mfspr(SPRN_HID4); - uint64_t hid5_val = mfspr(SPRN_HID5); uint64_t hmeer_val = mfspr(SPRN_HMEER); uint64_t msr_val = MSR_IDLE; uint64_t psscr_val = pnv_deepest_stop_psscr_val; @@ -117,6 +114,9 @@ static int pnv_save_sprs_for_deep_states(void) /* Only p8 needs to set extra HID regiters */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) { + uint64_t hid1_val = mfspr(SPRN_HID1); + uint64_t hid4_val = mfspr(SPRN_HID4); + uint64_t hid5_val = mfspr(SPRN_HID5); rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); if (rc != 0) -- cgit v1.2.3 From 8ac9b9d61f0eceba6ce571e7527798465ae9a7c5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 22 Jul 2020 15:53:15 +1000 Subject: selftests/powerpc: Add test of memcmp at end of page Update our memcmp selftest, to test the case where we're comparing up to the end of a page and the subsequent page is not mapped. We have to make sure we don't read off the end of the page and cause a fault. We had a bug there in the past, fixed in commit d9470757398a ("powerpc/64: Fix memcmp reading past the end of src/dest"). Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722055315.962391-1-mpe@ellerman.id.au --- .../testing/selftests/powerpc/stringloops/memcmp.c | 40 ++++++++++++---------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c index b1fa7546957f..e4605ca850dc 100644 --- a/tools/testing/selftests/powerpc/stringloops/memcmp.c +++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include "utils.h" @@ -13,6 +14,9 @@ #define LARGE_MAX_OFFSET 32 #define LARGE_SIZE_START 4096 +/* This is big enough to fit LARGE_SIZE and works on 4K & 64K kernels */ +#define MAP_SIZE (64 * 1024) + #define MAX_OFFSET_DIFF_S1_S2 48 int vmx_count; @@ -68,25 +72,25 @@ static void test_one(char *s1, char *s2, unsigned long max_offset, static int testcase(bool islarge) { - char *s1; - char *s2; - unsigned long i; - - unsigned long comp_size = (islarge ? LARGE_SIZE : SIZE); - unsigned long alloc_size = comp_size + MAX_OFFSET_DIFF_S1_S2; - int iterations = islarge ? LARGE_ITERATIONS : ITERATIONS; - - s1 = memalign(128, alloc_size); - if (!s1) { - perror("memalign"); - exit(1); - } + unsigned long i, comp_size, alloc_size; + char *p, *s1, *s2; + int iterations; - s2 = memalign(128, alloc_size); - if (!s2) { - perror("memalign"); - exit(1); - } + comp_size = (islarge ? LARGE_SIZE : SIZE); + alloc_size = comp_size + MAX_OFFSET_DIFF_S1_S2; + iterations = islarge ? LARGE_ITERATIONS : ITERATIONS; + + p = mmap(NULL, 4 * MAP_SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + FAIL_IF(p == MAP_FAILED); + + /* Put s1/s2 at the end of a page */ + s1 = p + MAP_SIZE - alloc_size; + s2 = p + 3 * MAP_SIZE - alloc_size; + + /* And unmap the subsequent page to force a fault if we overread */ + munmap(p + MAP_SIZE, MAP_SIZE); + munmap(p + 3 * MAP_SIZE, MAP_SIZE); srandom(time(0)); -- cgit v1.2.3 From 70cc062c47e7851335ff4c44ba9b362174baf7d4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 09:02:26 +1000 Subject: powerpc/test_emulate_sstep: Fix build error ppc64_book3e_allmodconfig fails with: arch/powerpc/lib/test_emulate_step.c: In function 'test_pld': arch/powerpc/lib/test_emulate_step.c:113:7: error: implicit declaration of function 'cpu_has_feature' 113 | if (!cpu_has_feature(CPU_FTR_ARCH_31)) { | ^~~~~~~~~~~~~~~ Add an include of cpu_has_feature.h to fix it. Fixes: b6b54b42722a ("powerpc/sstep: Add tests for prefixed integer load/stores") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724004109.1461709-1-mpe@ellerman.id.au --- arch/powerpc/lib/test_emulate_step.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 081b05480c47..d242e9f72e0c 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "emulate_step_test: " fmt #include +#include #include #include #include -- cgit v1.2.3 From 826b07b190c8ca69ce674f13b4dc9be2bc536fcd Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 20:42:54 +1000 Subject: powerpc/sstep: Fix incorrect CONFIG symbol in scv handling When I "fixed" the ppc64e build in Nick's recent patch, I typoed the CONFIG symbol, resulting in one that doesn't exist. Fix it to use the correct symbol. Reported-by: Christophe Leroy Fixes: 7fa95f9adaee ("powerpc/64s: system call support for scv/rfscv instructions") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131609.1640533-1-mpe@ellerman.id.au --- arch/powerpc/lib/sstep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 4194119eff82..c58ea9e787cb 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -3382,7 +3382,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) regs->msr = MSR_KERNEL; return 1; -#ifdef CONFIG_PPC64_BOOK3S +#ifdef CONFIG_PPC_BOOK3S_64 case SYSCALL_VECTORED_0: /* scv 0 */ regs->gpr[9] = regs->gpr[13]; regs->gpr[10] = MSR_KERNEL; -- cgit v1.2.3 From 269e829f48a0d0d27667abe25ca5c9e5b6ab08e2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sun, 26 Jul 2020 18:55:17 +0530 Subject: powerpc/book3s64/pkey: Disable pkey on POWER6 and before POWER6 only supports AMR update via privileged mode (MSR[PR] = 0, SPRN_AMR=29) The PR=1 (userspace) alias for that SPR (SPRN_AMR=13) was only supported from POWER7. Since we don't allow userspace modifying of AMR value we should disable pkey support on P6 and before. The hypervisor will still report pkey support via "ibm,processor-storage-keys". Hence also check for P7 CPU_FTR bit to decide on pkey support. Fixes: f491fe3fb41e ("powerpc/book3s64/pkeys: Simplify the key initialization") Reported-by: Michael Ellerman Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726132517.399076-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/pkeys.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 792b36aa9619..69a6b87f2bb4 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -73,6 +73,12 @@ static int scan_pkey_feature(void) if (early_radix_enabled()) return 0; + /* + * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 + */ + if (!early_cpu_has_feature(CPU_FTR_ARCH_206)) + return 0; + ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total); if (ret == 0) { /* -- cgit v1.2.3 From 3190ecbfeeb2ab17778887ce3fa964615d6460fd Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:04 +0530 Subject: powerpc/watchpoint: Fix 512 byte boundary limit Milton Miller reported that we are aligning start and end address to wrong size SZ_512M. It should be SZ_512. Fix that. While doing this change I also found a case where ALIGN() comparison fails. Within a given aligned range, ALIGN() of two addresses does not match when start address is pointing to the first byte and end address is pointing to any other byte except the first one. But that's not true for ALIGN_DOWN(). ALIGN_DOWN() of any two addresses within that range will always point to the first byte. So use ALIGN_DOWN() instead of ALIGN(). Fixes: e68ef121c1f4 ("powerpc/watchpoint: Use builtin ALIGN*() macros") Reported-by: Milton Miller Signed-off-by: Ravi Bangoria Tested-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-2-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/hw_breakpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 0000daf0e1da..031e6defc08e 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -419,7 +419,7 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) if (dawr_enabled()) { max_len = DAWR_MAX_LEN; /* DAWR region can't cross 512 bytes boundary */ - if (ALIGN(start_addr, SZ_512M) != ALIGN(end_addr - 1, SZ_512M)) + if (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)) return -EINVAL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { /* 8xx can setup a range without limitation */ -- cgit v1.2.3 From f6780ce619f8daa285760302d56e95892087bd1f Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:05 +0530 Subject: powerpc/watchpoint: Fix DAWR exception constraint Pedro Miraglia Franco de Carvalho noticed that on p8/p9, DAR value is inconsistent with different type of load/store. Like for byte,word etc. load/stores, DAR is set to the address of the first byte of overlap between watch range and real access. But for quadword load/ store it's sometime set to the address of the first byte of real access whereas sometime set to the address of the first byte of overlap. This issue has been fixed in p10. In p10(ISA 3.1), DAR is always set to the address of the first byte of overlap. Commit 27985b2a640e ("powerpc/watchpoint: Don't ignore extraneous exceptions blindly") wrongly assumes that DAR is set to the address of the first byte of overlap for all load/stores on p8/p9 as well. Fix that. With the fix, we now rely on 'ea' provided by analyse_instr(). If analyse_instr() fails, generate event unconditionally on p8/p9, and on p10 generate event only if DAR is within a DAWR range. Note: 8xx is not affected. Fixes: 27985b2a640e ("powerpc/watchpoint: Don't ignore extraneous exceptions blindly") Fixes: 74c6881019b7 ("powerpc/watchpoint: Prepare handler to handle more than one watchpoint") Reported-by: Pedro Miraglia Franco de Carvalho Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-3-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/hw_breakpoint.c | 72 +++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 031e6defc08e..a971e22aea81 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -498,11 +498,11 @@ static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info return ((info->address <= dar) && (dar - info->address < info->len)); } -static bool dar_user_range_overlaps(unsigned long dar, int size, - struct arch_hw_breakpoint *info) +static bool ea_user_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) { - return ((dar < info->address + info->len) && - (dar + size > info->address)); + return ((ea < info->address + info->len) && + (ea + size > info->address)); } static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) @@ -515,20 +515,22 @@ static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) return ((hw_start_addr <= dar) && (hw_end_addr > dar)); } -static bool dar_hw_range_overlaps(unsigned long dar, int size, - struct arch_hw_breakpoint *info) +static bool ea_hw_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) { unsigned long hw_start_addr, hw_end_addr; hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); - return ((dar < hw_end_addr) && (dar + size > hw_start_addr)); + return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); } /* * If hw has multiple DAWR registers, we also need to check all * dawrx constraint bits to confirm this is _really_ a valid event. + * If type is UNKNOWN, but privilege level matches, consider it as + * a positive match. */ static bool check_dawrx_constraints(struct pt_regs *regs, int type, struct arch_hw_breakpoint *info) @@ -553,7 +555,8 @@ static bool check_dawrx_constraints(struct pt_regs *regs, int type, * including extraneous exception. Otherwise return false. */ static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, - int type, int size, struct arch_hw_breakpoint *info) + unsigned long ea, int type, int size, + struct arch_hw_breakpoint *info) { bool in_user_range = dar_in_user_range(regs->dar, info); bool dawrx_constraints; @@ -569,22 +572,27 @@ static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, } if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { - if (in_user_range) - return true; + if (cpu_has_feature(CPU_FTR_ARCH_31) && + !dar_in_hw_range(regs->dar, info)) + return false; - if (dar_in_hw_range(regs->dar, info)) { - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - return true; - } - return false; + return true; } dawrx_constraints = check_dawrx_constraints(regs, type, info); - if (dar_user_range_overlaps(regs->dar, size, info)) + if (type == UNKNOWN) { + if (cpu_has_feature(CPU_FTR_ARCH_31) && + !dar_in_hw_range(regs->dar, info)) + return false; + return dawrx_constraints; + } - if (dar_hw_range_overlaps(regs->dar, size, info)) { + if (ea_user_range_overlaps(ea, size, info)) + return dawrx_constraints; + + if (ea_hw_range_overlaps(ea, size, info)) { if (dawrx_constraints) { info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; return true; @@ -594,7 +602,7 @@ static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, } static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, - int *type, int *size, bool *larx_stcx) + int *type, int *size, unsigned long *ea) { struct instruction_op op; @@ -602,16 +610,18 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, return; analyse_instr(&op, regs, *instr); - - /* - * Set size = 8 if analyse_instr() fails. If it's a userspace - * watchpoint(valid or extraneous), we can notify user about it. - * If it's a kernel watchpoint, instruction emulation will fail - * in stepping_handler() and watchpoint will be disabled. - */ *type = GETTYPE(op.type); - *size = !(*type == UNKNOWN) ? GETSIZE(op.type) : 8; - *larx_stcx = (*type == LARX || *type == STCX); + *ea = op.ea; +#ifdef __powerpc64__ + if (!(regs->msr & MSR_64BIT)) + *ea &= 0xffffffffUL; +#endif + *size = GETSIZE(op.type); +} + +static bool is_larx_stcx_instr(int type) +{ + return type == LARX || type == STCX; } /* @@ -678,7 +688,7 @@ int hw_breakpoint_handler(struct die_args *args) struct ppc_inst instr = ppc_inst(0); int type = 0; int size = 0; - bool larx_stcx = false; + unsigned long ea; /* Disable breakpoints during exception handling */ hw_breakpoint_disable(); @@ -692,7 +702,7 @@ int hw_breakpoint_handler(struct die_args *args) rcu_read_lock(); if (!IS_ENABLED(CONFIG_PPC_8xx)) - get_instr_detail(regs, &instr, &type, &size, &larx_stcx); + get_instr_detail(regs, &instr, &type, &size, &ea); for (i = 0; i < nr_wp_slots(); i++) { bp[i] = __this_cpu_read(bp_per_reg[i]); @@ -702,7 +712,7 @@ int hw_breakpoint_handler(struct die_args *args) info[i] = counter_arch_bp(bp[i]); info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; - if (check_constraints(regs, instr, type, size, info[i])) { + if (check_constraints(regs, instr, ea, type, size, info[i])) { if (!IS_ENABLED(CONFIG_PPC_8xx) && ppc_inst_equal(instr, ppc_inst(0))) { handler_error(bp[i], info[i]); @@ -744,7 +754,7 @@ int hw_breakpoint_handler(struct die_args *args) } if (!IS_ENABLED(CONFIG_PPC_8xx)) { - if (larx_stcx) { + if (is_larx_stcx_instr(type)) { for (i = 0; i < nr_wp_slots(); i++) { if (!hit[i]) continue; -- cgit v1.2.3 From f3c832f1350bcf1e6906113ee3168066f4235dbe Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:06 +0530 Subject: powerpc/watchpoint: Fix DAWR exception for CACHEOP 'ea' returned by analyse_instr() needs to be aligned down to cache block size for CACHEOP instructions. analyse_instr() does not set size for CACHEOP, thus size also needs to be calculated manually. Fixes: 27985b2a640e ("powerpc/watchpoint: Don't ignore extraneous exceptions blindly") Fixes: 74c6881019b7 ("powerpc/watchpoint: Prepare handler to handle more than one watchpoint") Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-4-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/hw_breakpoint.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index a971e22aea81..c55e67bab271 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -538,7 +538,12 @@ static bool check_dawrx_constraints(struct pt_regs *regs, int type, if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) return false; - if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE)) + /* + * The Cache Management instructions other than dcbz never + * cause a match. i.e. if type is CACHEOP, the instruction + * is dcbz, and dcbz is treated as Store. + */ + if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE)) return false; if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) @@ -601,6 +606,15 @@ static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, return false; } +static int cache_op_size(void) +{ +#ifdef __powerpc64__ + return ppc64_caches.l1d.block_size; +#else + return L1_CACHE_BYTES; +#endif +} + static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, int *type, int *size, unsigned long *ea) { @@ -616,7 +630,12 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, if (!(regs->msr & MSR_64BIT)) *ea &= 0xffffffffUL; #endif + *size = GETSIZE(op.type); + if (*type == CACHEOP) { + *size = cache_op_size(); + *ea &= ~(*size - 1); + } } static bool is_larx_stcx_instr(int type) -- cgit v1.2.3 From 8f460a8175e6d85537d581734e9fa7ef97036b1a Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:07 +0530 Subject: powerpc/watchpoint: Enable watchpoint functionality on power10 guest CPU_FTR_DAWR is by default enabled for host via CPU_FTRS_DT_CPU_BASE (controlled by CONFIG_PPC_DT_CPU_FTRS). But cpu-features device-tree node is not PAPR compatible and thus not yet used by kvm or pHyp guests. Enable watchpoint functionality on power10 guest (both kvm and powervm) by adding CPU_FTR_DAWR to CPU_FTRS_POWER10. Note that this change does not enable 2nd DAWR support. Signed-off-by: Ravi Bangoria Tested-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-5-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/cputable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index a461c3300804..a0453fca2dff 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -477,7 +477,8 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ - CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31) + CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \ + CPU_FTR_DAWR) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ -- cgit v1.2.3 From dc1cedca54704d336c333b5398daaf13b23e391b Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:08 +0530 Subject: powerpc/dt_cpu_ftrs: Add feature for 2nd DAWR Add new device-tree feature for 2nd DAWR. If this feature is present, 2nd DAWR is supported, otherwise not. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-6-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/cputable.h | 3 ++- arch/powerpc/kernel/dt_cpu_ftrs.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index a0453fca2dff..d36db35e3aac 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -214,6 +214,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000) #define CPU_FTR_P9_RADIX_PREFETCH_BUG LONG_ASM_CONST(0x0002000000000000) #define CPU_FTR_ARCH_31 LONG_ASM_CONST(0x0004000000000000) +#define CPU_FTR_DAWR1 LONG_ASM_CONST(0x0008000000000000) #ifndef __ASSEMBLY__ @@ -478,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \ - CPU_FTR_DAWR) + CPU_FTR_DAWR | CPU_FTR_DAWR1) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 22ba7fba1147..6f8c0c6b937a 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -686,6 +686,7 @@ static struct dt_cpu_feature_match __initdata {"wait-v3", feat_enable, 0}, {"prefix-instructions", feat_enable, 0}, {"matrix-multiply-assist", feat_enable_mma, 0}, + {"debug-facilities-v31", feat_enable, CPU_FTR_DAWR1}, }; static bool __initdata using_dt_cpu_ftrs; -- cgit v1.2.3 From 8f45ca3f8b87c4810674fbfe65de6d041ee0baee Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:09 +0530 Subject: powerpc/watchpoint: Set CPU_FTR_DAWR1 based on pa-features bit As per the PAPR, bit 0 of byte 64 in pa-features property indicates availability of 2nd DAWR registers. i.e. If this bit is set, 2nd DAWR is present, otherwise not. Host generally uses "cpu-features", which masks "pa-features". But "cpu-features" are still not used for guests and thus this change is mostly applicable for guests only. Signed-off-by: Ravi Bangoria Tested-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-7-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/prom.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index dfe0103ad131..5a05c5e993fd 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -175,6 +175,8 @@ static struct ibm_pa_feature { */ { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP, .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP }, + + { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 }, }; static void __init scan_features(unsigned long node, const unsigned char *ftrs, -- cgit v1.2.3 From 6f3fe297f95134e9b2386dae0067bf530e1ddca0 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:10 +0530 Subject: powerpc/watchpoint: Rename current H_SET_MODE DAWR macro Current H_SET_MODE hcall macro name for setting/resetting DAWR0 is H_SET_MODE_RESOURCE_SET_DAWR. Add suffix 0 to macro name as well. Signed-off-by: Ravi Bangoria Reviewed-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-8-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hvcall.h | 2 +- arch/powerpc/include/asm/plpar_wrappers.h | 2 +- arch/powerpc/kvm/book3s_hv.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 8d7ee3b533c4..9ab367030f4f 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -355,7 +355,7 @@ /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 -#define H_SET_MODE_RESOURCE_SET_DAWR 2 +#define H_SET_MODE_RESOURCE_SET_DAWR0 2 #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 #define H_SET_MODE_RESOURCE_LE 4 diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 4293c5d2ddf4..d12c3680d946 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -312,7 +312,7 @@ static inline long plpar_set_ciabr(unsigned long ciabr) static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0) { - return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); + return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0); } static inline long plpar_signal_sys_reset(long cpu) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0a0df31987f7..a22ac01fa660 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -764,7 +764,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, return H_P3; vcpu->arch.ciabr = value1; return H_SUCCESS; - case H_SET_MODE_RESOURCE_SET_DAWR: + case H_SET_MODE_RESOURCE_SET_DAWR0: if (!kvmppc_power8_compatible(vcpu)) return H_P2; if (!ppc_breakpoint_available()) -- cgit v1.2.3 From 03f3e54abd95061ea11bdb4eedbe3cab6553704f Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:11 +0530 Subject: powerpc/watchpoint: Guest support for 2nd DAWR hcall 2nd DAWR can be set/unset using H_SET_MODE hcall with resource value 5. Enable powervm guest support with that. This has no effect on kvm guest because kvm will return error if guest does hcall with resource value 5. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-9-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/hvcall.h | 1 + arch/powerpc/include/asm/machdep.h | 2 +- arch/powerpc/include/asm/plpar_wrappers.h | 5 +++++ arch/powerpc/kernel/dawr.c | 2 +- arch/powerpc/platforms/pseries/setup.c | 7 +++++-- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 9ab367030f4f..fbb377055471 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -358,6 +358,7 @@ #define H_SET_MODE_RESOURCE_SET_DAWR0 2 #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 #define H_SET_MODE_RESOURCE_LE 4 +#define H_SET_MODE_RESOURCE_SET_DAWR1 5 /* Values for argument to H_SIGNAL_SYS_RESET */ #define H_SIGNAL_SYS_RESET_ALL -1 diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 7bcb64444a39..a90b892f0bfe 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -131,7 +131,7 @@ struct machdep_calls { unsigned long dabrx); /* Set DAWR for this platform, leave empty for default implementation */ - int (*set_dawr)(unsigned long dawr, + int (*set_dawr)(int nr, unsigned long dawr, unsigned long dawrx); #ifdef CONFIG_PPC32 /* XXX for now */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index d12c3680d946..ece84a430701 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -315,6 +315,11 @@ static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawr return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0); } +static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1) +{ + return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1); +} + static inline long plpar_signal_sys_reset(long cpu) { return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c index 500f52fa4711..cdc2dccb987d 100644 --- a/arch/powerpc/kernel/dawr.c +++ b/arch/powerpc/kernel/dawr.c @@ -37,7 +37,7 @@ int set_dawr(int nr, struct arch_hw_breakpoint *brk) dawrx |= (mrd & 0x3f) << (63 - 53); if (ppc_md.set_dawr) - return ppc_md.set_dawr(dawr, dawrx); + return ppc_md.set_dawr(nr, dawr, dawrx); if (nr == 0) { mtspr(SPRN_DAWR0, dawr); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8c85466e0dd8..ba34eb23e8f5 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -833,12 +833,15 @@ static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx) return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx); } -static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx) +static int pseries_set_dawr(int nr, unsigned long dawr, unsigned long dawrx) { /* PAPR says we can't set HYP */ dawrx &= ~DAWRX_HYP; - return plpar_set_watchpoint0(dawr, dawrx); + if (nr == 0) + return plpar_set_watchpoint0(dawr, dawrx); + else + return plpar_set_watchpoint1(dawr, dawrx); } #define CMO_CHARACTERISTICS_TOKEN 44 -- cgit v1.2.3 From deb2bd9bcc8428d4b65b6ba640ba8b57c1b20b17 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:12 +0530 Subject: powerpc/watchpoint: Return available watchpoints dynamically So far Book3S Powerpc supported only one watchpoint. Power10 is introducing 2nd DAWR. Enable 2nd DAWR support for Power10. Availability of 2nd DAWR will depend on CPU_FTR_DAWR1. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-10-ravi.bangoria@linux.ibm.com --- arch/powerpc/include/asm/cputable.h | 5 +++-- arch/powerpc/include/asm/hw_breakpoint.h | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index d36db35e3aac..fdddb822d564 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -629,9 +629,10 @@ enum { /* * Maximum number of hw breakpoint supported on powerpc. Number of - * breakpoints supported by actual hw might be less than this. + * breakpoints supported by actual hw might be less than this, which + * is decided at run time in nr_wp_slots(). */ -#define HBP_NUM_MAX 1 +#define HBP_NUM_MAX 2 #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index cb424799da0d..c89250b6ac34 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -9,6 +9,8 @@ #ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H #define _PPC_BOOK3S_64_HW_BREAKPOINT_H +#include + #ifdef __KERNEL__ struct arch_hw_breakpoint { unsigned long address; @@ -46,7 +48,7 @@ struct arch_hw_breakpoint { static inline int nr_wp_slots(void) { - return HBP_NUM_MAX; + return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1; } #ifdef CONFIG_HAVE_HW_BREAKPOINT -- cgit v1.2.3 From 3f31e49dc4588d396023028791e36c23235e1334 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 23 Jul 2020 14:38:13 +0530 Subject: powerpc/watchpoint: Remove 512 byte boundary Power10 has removed 512 bytes boundary from match criteria i.e. the watch range can cross 512 bytes boundary. Note: ISA 3.1 Book III 9.4 match criteria includes 512 byte limit but that is a documentation mistake and hopefully will be fixed in the next version of ISA. Though, ISA 3.1 change log mentions about removal of 512B boundary: Multiple DEAW: Added a second Data Address Watchpoint. [H]DAR is set to the first byte of overlap. 512B boundary is removed. Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200723090813.303838-11-ravi.bangoria@linux.ibm.com --- arch/powerpc/kernel/hw_breakpoint.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index c55e67bab271..1f4a1efa0074 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -418,8 +418,9 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw) if (dawr_enabled()) { max_len = DAWR_MAX_LEN; - /* DAWR region can't cross 512 bytes boundary */ - if (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)) + /* DAWR region can't cross 512 bytes boundary on p10 predecessors */ + if (!cpu_has_feature(CPU_FTR_ARCH_31) && + (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512))) return -EINVAL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { /* 8xx can setup a range without limitation */ -- cgit v1.2.3 From 475028efc708880e16e61cc4cbbc00af784cb39b Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:18 +1000 Subject: powerpc/eeh: Remove eeh_dev_phb_init_dynamic() This function is a one line wrapper around eeh_phb_pe_create() and despite the name it doesn't create any eeh_dev structures. Replace it with direct calls to eeh_phb_pe_create() since that does what it says on the tin and removes a layer of indirection. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-1-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 2 +- arch/powerpc/kernel/eeh.c | 2 +- arch/powerpc/kernel/eeh_dev.c | 13 ------------- arch/powerpc/kernel/of_platform.c | 4 ++-- arch/powerpc/platforms/pseries/pci_dlpar.c | 2 +- 5 files changed, 5 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 964a54292b36..1a19b1bb74c0 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -294,7 +294,6 @@ const char *eeh_pe_loc_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); struct eeh_dev *eeh_dev_init(struct pci_dn *pdn); -void eeh_dev_phb_init_dynamic(struct pci_controller *phb); void eeh_show_enabled(void); int __init eeh_ops_register(struct eeh_ops *ops); int __exit eeh_ops_unregister(const char *name); @@ -362,6 +361,7 @@ static inline void eeh_remove_device(struct pci_dev *dev) { } #define EEH_POSSIBLE_ERROR(val, type) (0) #define EEH_IO_ERROR_VALUE(size) (-1UL) +static inline int eeh_phb_pe_create(struct pci_controller *phb) { return 0; } #endif /* CONFIG_EEH */ #if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index d407981dec76..859f76020256 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1096,7 +1096,7 @@ static int eeh_init(void) /* Initialize PHB PEs */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) - eeh_dev_phb_init_dynamic(hose); + eeh_phb_pe_create(hose); eeh_addr_cache_init(); diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index 7370185c7a05..8e159a12f10c 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c @@ -52,16 +52,3 @@ struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) return edev; } - -/** - * eeh_dev_phb_init_dynamic - Create EEH devices for devices included in PHB - * @phb: PHB - * - * Scan the PHB OF node and its child association, then create the - * EEH devices accordingly - */ -void eeh_dev_phb_init_dynamic(struct pci_controller *phb) -{ - /* EEH PE for PHB */ - eeh_phb_pe_create(phb); -} diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 71a3f97dc988..f89376ff633e 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -62,8 +62,8 @@ static int of_pci_phb_probe(struct platform_device *dev) /* Init pci_dn data structures */ pci_devs_phb_init_dynamic(phb); - /* Create EEH PEs for the PHB */ - eeh_dev_phb_init_dynamic(phb); + /* Create EEH PE for the PHB */ + eeh_phb_pe_create(phb); /* Scan the bus */ pcibios_scan_phb(phb); diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index b3a38f5a6b68..f9ae17e8a0f4 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -34,7 +34,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) pci_devs_phb_init_dynamic(phb); /* Create EEH devices for the PHB */ - eeh_dev_phb_init_dynamic(phb); + eeh_phb_pe_create(phb); if (dn->child) pseries_eeh_init_edev_recursive(PCI_DN(dn)); -- cgit v1.2.3 From d74ee8e9d12e2071014ecec96a1ce2744f77639d Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:19 +1000 Subject: powerpc/eeh: Remove eeh_dev.c The only thing in this file is eeh_dev_init() which is allocates and initialises an eeh_dev based on a pci_dn. This is only ever called from pci_dn.c so move it into there and remove the file. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-2-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 6 ----- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/eeh_dev.c | 54 ------------------------------------------ arch/powerpc/kernel/pci_dn.c | 20 ++++++++++++++++ 4 files changed, 21 insertions(+), 61 deletions(-) delete mode 100644 arch/powerpc/kernel/eeh_dev.c diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 1a19b1bb74c0..dd7dd55db7dc 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -293,7 +293,6 @@ void eeh_pe_restore_bars(struct eeh_pe *pe); const char *eeh_pe_loc_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); -struct eeh_dev *eeh_dev_init(struct pci_dn *pdn); void eeh_show_enabled(void); int __init eeh_ops_register(struct eeh_ops *ops); int __exit eeh_ops_unregister(const char *name); @@ -339,11 +338,6 @@ static inline bool eeh_enabled(void) static inline void eeh_show_enabled(void) { } -static inline void *eeh_dev_init(struct pci_dn *pdn, void *data) -{ - return NULL; -} - static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } static inline int eeh_check_failure(const volatile void __iomem *token) diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 244542ae2a91..c5211bdcf1b6 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -71,7 +71,7 @@ obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_PROC) += rtas-proc.o obj-$(CONFIG_PPC_DT_CPU_FTRS) += dt_cpu_ftrs.o -obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ +obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_cache.o \ eeh_driver.o eeh_event.o eeh_sysfs.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c deleted file mode 100644 index 8e159a12f10c..000000000000 --- a/arch/powerpc/kernel/eeh_dev.c +++ /dev/null @@ -1,54 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The file intends to implement dynamic creation of EEH device, which will - * be bound with OF node and PCI device simutaneously. The EEH devices would - * be foundamental information for EEH core components to work proerly. Besides, - * We have to support multiple situations where dynamic creation of EEH device - * is required: - * - * 1) Before PCI emunation starts, we need create EEH devices according to the - * PCI sensitive OF nodes. - * 2) When PCI emunation is done, we need do the binding between PCI device and - * the associated EEH device. - * 3) DR (Dynamic Reconfiguration) would create PCI sensitive OF node. EEH device - * will be created while PCI sensitive OF node is detected from DR. - * 4) PCI hotplug needs redoing the binding between PCI device and EEH device. If - * PHB is newly inserted, we also need create EEH devices accordingly. - * - * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -/** - * eeh_dev_init - Create EEH device according to OF node - * @pdn: PCI device node - * - * It will create EEH device according to the given OF node. The function - * might be called by PCI emunation, DR, PHB hotplug. - */ -struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) -{ - struct eeh_dev *edev; - - /* Allocate EEH device */ - edev = kzalloc(sizeof(*edev), GFP_KERNEL); - if (!edev) - return NULL; - - /* Associate EEH device with OF node */ - pdn->edev = edev; - edev->pdn = pdn; - edev->bdfn = (pdn->busno << 8) | pdn->devfn; - edev->controller = pdn->phb; - - return edev; -} diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 4e654df55969..f790a8d06f50 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -124,6 +124,26 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev) return NULL; } +#ifdef CONFIG_EEH +static struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) +{ + struct eeh_dev *edev; + + /* Allocate EEH device */ + edev = kzalloc(sizeof(*edev), GFP_KERNEL); + if (!edev) + return NULL; + + /* Associate EEH device with OF node */ + pdn->edev = edev; + edev->pdn = pdn; + edev->bdfn = (pdn->busno << 8) | pdn->devfn; + edev->controller = pdn->phb; + + return edev; +} +#endif /* CONFIG_EEH */ + #ifdef CONFIG_PCI_IOV static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent, int vf_index, -- cgit v1.2.3 From dffa91539e80355402c0716a91af17fc8ddd1abf Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:20 +1000 Subject: powerpc/eeh: Move vf_index out of pci_dn and into eeh_dev Drivers that do not support the PCI error handling callbacks are handled by tearing down the device and re-probing them. If the device being removed is a virtual function then we need to know the VF index so it can be removed using the pci_iov_{add|remove}_virtfn() API. Currently this is handled by looking up the pci_dn, and using the vf_index that was stashed there when the pci_dn for the VF was created in pcibios_sriov_enable(). We would like to eliminate the use of pci_dn outside of pseries though so we need to provide the generic EEH code with some other way to find the vf_index. The easiest thing to do here is move the vf_index field out of pci_dn and into eeh_dev. Currently pci_dn and eeh_dev are allocated and initialized together so this is a fairly minimal change in preparation for splitting pci_dn and eeh_dev in the future. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-3-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 3 +++ arch/powerpc/include/asm/pci-bridge.h | 1 - arch/powerpc/kernel/eeh_driver.c | 6 ++---- arch/powerpc/kernel/pci_dn.c | 7 ++++--- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index dd7dd55db7dc..2a935db72198 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -148,7 +148,10 @@ struct eeh_dev { struct pci_dn *pdn; /* Associated PCI device node */ struct pci_dev *pdev; /* Associated PCI device */ bool in_error; /* Error flag for edev */ + + /* VF specific properties */ struct pci_dev *physfn; /* Associated SRIOV PF */ + int vf_index; /* Index of this VF */ }; /* "fmt" must be a simple literal string */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index b92e81b256e5..d2a2a14e56f9 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -202,7 +202,6 @@ struct pci_dn { #define IODA_INVALID_PE 0xFFFFFFFF unsigned int pe_number; #ifdef CONFIG_PCI_IOV - int vf_index; /* VF index in the PF */ u16 vfs_expanded; /* number of VFs IOV BAR expanded */ u16 num_vfs; /* number of VFs enabled*/ unsigned int *pe_num_map; /* PE# for the first VF PE or array */ diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7b048cee767c..b70b9273f45a 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -477,7 +477,7 @@ static void *eeh_add_virt_device(struct eeh_dev *edev) } #ifdef CONFIG_PCI_IOV - pci_iov_add_virtfn(edev->physfn, eeh_dev_to_pdn(edev)->vf_index); + pci_iov_add_virtfn(edev->physfn, edev->vf_index); #endif return NULL; } @@ -521,9 +521,7 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata) if (edev->physfn) { #ifdef CONFIG_PCI_IOV - struct pci_dn *pdn = eeh_dev_to_pdn(edev); - - pci_iov_remove_virtfn(edev->physfn, pdn->vf_index); + pci_iov_remove_virtfn(edev->physfn, edev->vf_index); edev->pdev = NULL; #endif if (rmv_data) diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index f790a8d06f50..bf11ac8427ac 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -146,7 +146,6 @@ static struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) #ifdef CONFIG_PCI_IOV static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent, - int vf_index, int busno, int devfn) { struct pci_dn *pdn; @@ -163,7 +162,6 @@ static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent, pdn->parent = parent; pdn->busno = busno; pdn->devfn = devfn; - pdn->vf_index = vf_index; pdn->pe_number = IODA_INVALID_PE; INIT_LIST_HEAD(&pdn->child_list); INIT_LIST_HEAD(&pdn->list); @@ -194,7 +192,7 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev) for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) { struct eeh_dev *edev __maybe_unused; - pdn = add_one_sriov_vf_pdn(parent, i, + pdn = add_one_sriov_vf_pdn(parent, pci_iov_virtfn_bus(pdev, i), pci_iov_virtfn_devfn(pdev, i)); if (!pdn) { @@ -207,7 +205,10 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev) /* Create the EEH device for the VF */ edev = eeh_dev_init(pdn); BUG_ON(!edev); + + /* FIXME: these should probably be populated by the EEH probe */ edev->physfn = pdev; + edev->vf_index = i; #endif /* CONFIG_EEH */ } return pci_get_pdn(pdev); -- cgit v1.2.3 From c408ce9075b8e1533f30fd3a113b75fb745f722f Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:21 +1000 Subject: powerpc/pseries: Stop using pdn->pe_number The pci_dn->pe_number field is mainly used to track the IODA PE number of a device on PowerNV. At some point it grew a user in the pseries SR-IOV support which muddies the waters a bit, so remove it. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-4-oohall@gmail.com --- arch/powerpc/platforms/pseries/eeh_pseries.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 47760fb515cc..a82057b58144 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -53,8 +53,6 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev) dev_dbg(&pdev->dev, "EEH: Setting up device\n"); #ifdef CONFIG_PCI_IOV if (pdev->is_virtfn) { - struct pci_dn *physfn_pdn; - pdn->device_id = pdev->device; pdn->vendor_id = pdev->vendor; pdn->class_code = pdev->class; @@ -64,8 +62,6 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev) * completion from platform. */ pdn->last_allow_rc = 0; - physfn_pdn = pci_get_pdn(pdev->physfn); - pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index]; } #endif pseries_eeh_init_edev(pdn); @@ -878,8 +874,8 @@ int pseries_send_allow_unfreeze(struct pci_dn *pdn, static int pseries_call_allow_unfreeze(struct eeh_dev *edev) { + int cur_vfs = 0, rc = 0, vf_index, bus, devfn, vf_pe_num; struct pci_dn *pdn, *tmp, *parent, *physfn_pdn; - int cur_vfs = 0, rc = 0, vf_index, bus, devfn; u16 *vf_pe_array; vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); @@ -912,8 +908,10 @@ static int pseries_call_allow_unfreeze(struct eeh_dev *edev) } } else { pdn = pci_get_pdn(edev->pdev); - vf_pe_array[0] = cpu_to_be16(pdn->pe_number); physfn_pdn = pci_get_pdn(edev->physfn); + + vf_pe_num = physfn_pdn->pe_num_map[edev->vf_index]; + vf_pe_array[0] = cpu_to_be16(vf_pe_num); rc = pseries_send_allow_unfreeze(physfn_pdn, vf_pe_array, 1); pdn->last_allow_rc = rc; -- cgit v1.2.3 From a40db934312cb2a4bef16b3edc962bc8c7f6462f Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:22 +1000 Subject: powerpc/eeh: Kill off eeh_ops->get_pe_addr() This is used in precisely one place which is in pseries specific platform code. There's no need to have the callback in eeh_ops since the platform chooses the EEH PE addresses anyway. The PowerNV implementation has always been a stub too so remove it. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-5-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 1 - arch/powerpc/platforms/powernv/eeh-powernv.c | 13 ------------- arch/powerpc/platforms/pseries/eeh_pseries.c | 22 +++++++++++----------- 3 files changed, 11 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 2a935db72198..676d499bda42 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -220,7 +220,6 @@ struct eeh_ops { int (*init)(void); struct eeh_dev *(*probe)(struct pci_dev *pdev); int (*set_option)(struct eeh_pe *pe, int option); - int (*get_pe_addr)(struct eeh_pe *pe); int (*get_state)(struct eeh_pe *pe, int *delay); int (*reset)(struct eeh_pe *pe, int option); int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len); diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 79409e005fcd..bcd0515d8f79 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -535,18 +535,6 @@ static int pnv_eeh_set_option(struct eeh_pe *pe, int option) return 0; } -/** - * pnv_eeh_get_pe_addr - Retrieve PE address - * @pe: EEH PE - * - * Retrieve the PE address according to the given tranditional - * PCI BDF (Bus/Device/Function) address. - */ -static int pnv_eeh_get_pe_addr(struct eeh_pe *pe) -{ - return pe->addr; -} - static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; @@ -1670,7 +1658,6 @@ static struct eeh_ops pnv_eeh_ops = { .init = pnv_eeh_init, .probe = pnv_eeh_probe, .set_option = pnv_eeh_set_option, - .get_pe_addr = pnv_eeh_get_pe_addr, .get_state = pnv_eeh_get_state, .reset = pnv_eeh_reset, .get_log = pnv_eeh_get_log, diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index a82057b58144..4a8a851e64d1 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -33,6 +33,8 @@ #include #include +static int pseries_eeh_get_pe_addr(struct pci_dn *pdn); + /* RTAS tokens */ static int ibm_set_eeh_option; static int ibm_set_slot_reset; @@ -468,7 +470,7 @@ void pseries_eeh_init_edev(struct pci_dn *pdn) eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret); } else { /* Retrieve PE address */ - edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); + edev->pe_config_addr = pseries_eeh_get_pe_addr(pdn); pe.addr = edev->pe_config_addr; /* Some older systems (Power4) allow the ibm,set-eeh-option @@ -598,8 +600,10 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) * It's notable that zero'ed return value means invalid PE config * address. */ -static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) +static int pseries_eeh_get_pe_addr(struct pci_dn *pdn) { + int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); + unsigned long buid = pdn->phb->buid; int ret = 0; int rets[3]; @@ -610,18 +614,16 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) * meaningless. */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, - pe->config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), 1); + config_addr, BUID_HI(buid), BUID_LO(buid), 1); if (ret || (rets[0] == 0)) return 0; /* Retrieve the associated PE config address */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, - pe->config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), 0); + config_addr, BUID_HI(buid), BUID_LO(buid), 0); if (ret) { pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", - __func__, pe->phb->global_number, pe->config_addr); + __func__, pdn->phb->global_number, config_addr); return 0; } @@ -630,11 +632,10 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, - pe->config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), 0); + config_addr, BUID_HI(buid), BUID_LO(buid), 0); if (ret) { pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", - __func__, pe->phb->global_number, pe->config_addr); + __func__, pdn->phb->global_number, config_addr); return 0; } @@ -945,7 +946,6 @@ static struct eeh_ops pseries_eeh_ops = { .init = pseries_eeh_init, .probe = pseries_eeh_probe, .set_option = pseries_eeh_set_option, - .get_pe_addr = pseries_eeh_get_pe_addr, .get_state = pseries_eeh_get_state, .reset = pseries_eeh_reset, .get_log = pseries_eeh_get_log, -- cgit v1.2.3 From 21b43bd59c7838825b94eea288333affb53dd399 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:23 +1000 Subject: powerpc/eeh: Remove VF config space restoration There's a bunch of strange things about this code. First up is that none of the fields being written to are functional for a VF. The SR-IOV specification lists then as "Reserved, but OS should preserve" so writing new values to them doesn't do anything and is clearly wrong from a correctness perspective. However, since VFs are designed to be managed by the OS there is an argument to be made that we should be saving and restoring some parts of config space. We already sort of do that by saving the first 64 bytes of config space in the eeh_dev (see eeh_dev->config_space[]). This is inadequate since it doesn't even consider saving and restoring the PCI capability structures. However, this is a problem with EEH in general and that needs to be fixed for non-VF devices too. There's no real reason to keep around this around so delete it. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-6-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 1 - arch/powerpc/kernel/eeh.c | 59 ---------------------------- arch/powerpc/platforms/powernv/eeh-powernv.c | 20 +++------- arch/powerpc/platforms/pseries/eeh_pseries.c | 26 +----------- 4 files changed, 7 insertions(+), 99 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 676d499bda42..091d1ed28798 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -314,7 +314,6 @@ int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed); int eeh_pe_configure(struct eeh_pe *pe); int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, unsigned long addr, unsigned long mask); -int eeh_restore_vf_config(struct pci_dn *pdn); /** * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 859f76020256..a4df6f6de0bd 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -742,65 +742,6 @@ static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) pci_restore_state(pdev); } -int eeh_restore_vf_config(struct pci_dn *pdn) -{ - struct eeh_dev *edev = pdn_to_eeh_dev(pdn); - u32 devctl, cmd, cap2, aer_capctl; - int old_mps; - - if (edev->pcie_cap) { - /* Restore MPS */ - old_mps = (ffs(pdn->mps) - 8) << 5; - eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, - 2, &devctl); - devctl &= ~PCI_EXP_DEVCTL_PAYLOAD; - devctl |= old_mps; - eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, - 2, devctl); - - /* Disable Completion Timeout if possible */ - eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2, - 4, &cap2); - if (cap2 & PCI_EXP_DEVCAP2_COMP_TMOUT_DIS) { - eeh_ops->read_config(pdn, - edev->pcie_cap + PCI_EXP_DEVCTL2, - 4, &cap2); - cap2 |= PCI_EXP_DEVCTL2_COMP_TMOUT_DIS; - eeh_ops->write_config(pdn, - edev->pcie_cap + PCI_EXP_DEVCTL2, - 4, cap2); - } - } - - /* Enable SERR and parity checking */ - eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd); - cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); - eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd); - - /* Enable report various errors */ - if (edev->pcie_cap) { - eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, - 2, &devctl); - devctl &= ~PCI_EXP_DEVCTL_CERE; - devctl |= (PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_URRE); - eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, - 2, devctl); - } - - /* Enable ECRC generation and check */ - if (edev->pcie_cap && edev->aer_cap) { - eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP, - 4, &aer_capctl); - aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); - eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP, - 4, aer_capctl); - } - - return 0; -} - /** * pcibios_set_pcie_reset_state - Set PCI-E reset state * @dev: pci device struct diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index bcd0515d8f79..8f3a7611efc1 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1629,20 +1629,12 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn) if (!edev) return -EEXIST; - /* - * We have to restore the PCI config space after reset since the - * firmware can't see SRIOV VFs. - * - * FIXME: The MPS, error routing rules, timeout setting are worthy - * to be exported by firmware in extendible way. - */ - if (edev->physfn) { - ret = eeh_restore_vf_config(pdn); - } else { - phb = pdn->phb->private_data; - ret = opal_pci_reinit(phb->opal_id, - OPAL_REINIT_PCI_DEV, config_addr); - } + if (edev->physfn) + return 0; + + phb = edev->controller->private_data; + ret = opal_pci_reinit(phb->opal_id, + OPAL_REINIT_PCI_DEV, edev->bdfn); if (ret) { pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 4a8a851e64d1..e32b49ead313 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -824,30 +824,6 @@ static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 return rtas_write_config(pdn, where, size, val); } -static int pseries_eeh_restore_config(struct pci_dn *pdn) -{ - struct eeh_dev *edev = pdn_to_eeh_dev(pdn); - s64 ret = 0; - - if (!edev) - return -EEXIST; - - /* - * FIXME: The MPS, error routing rules, timeout setting are worthy - * to be exported by firmware in extendible way. - */ - if (edev->physfn) - ret = eeh_restore_vf_config(pdn); - - if (ret) { - pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", - __func__, edev->pe_config_addr, ret); - return -EIO; - } - - return ret; -} - #ifdef CONFIG_PCI_IOV int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs) @@ -954,7 +930,7 @@ static struct eeh_ops pseries_eeh_ops = { .read_config = pseries_eeh_read_config, .write_config = pseries_eeh_write_config, .next_error = NULL, - .restore_config = pseries_eeh_restore_config, + .restore_config = NULL, /* NB: configure_bridge() does this */ #ifdef CONFIG_PCI_IOV .notify_resume = pseries_notify_resume #endif -- cgit v1.2.3 From 0c2c76523c04ac184c7d7bbb8756f603375b7fc4 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:24 +1000 Subject: powerpc/eeh: Pass eeh_dev to eeh_ops->restore_config() Mechanical conversion of the eeh_ops interfaces to use eeh_dev to reference a specific device rather than pci_dn. No functional changes. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-7-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 2 +- arch/powerpc/kernel/eeh.c | 5 ++--- arch/powerpc/kernel/eeh_pe.c | 6 ++---- arch/powerpc/platforms/powernv/eeh-powernv.c | 6 ++---- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 091d1ed28798..c23c8fd5229d 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -229,7 +229,7 @@ struct eeh_ops { int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val); int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val); int (*next_error)(struct eeh_pe **pe); - int (*restore_config)(struct pci_dn *pdn); + int (*restore_config)(struct eeh_dev *edev); int (*notify_resume)(struct pci_dn *pdn); }; diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index a4df6f6de0bd..1cef0f4bb2d5 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -726,7 +726,6 @@ static void eeh_disable_and_save_dev_state(struct eeh_dev *edev, static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) { - struct pci_dn *pdn = eeh_dev_to_pdn(edev); struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); struct pci_dev *dev = userdata; @@ -734,8 +733,8 @@ static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) return; /* Apply customization from firmware */ - if (pdn && eeh_ops->restore_config) - eeh_ops->restore_config(pdn); + if (eeh_ops->restore_config) + eeh_ops->restore_config(edev); /* The caller should restore state for the specified device */ if (pdev != dev) diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 177852e39a25..d71493f66917 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -843,16 +843,14 @@ static void eeh_restore_device_bars(struct eeh_dev *edev) */ static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag) { - struct pci_dn *pdn = eeh_dev_to_pdn(edev); - /* Do special restore for bridges */ if (edev->mode & EEH_DEV_BRIDGE) eeh_restore_bridge_bars(edev); else eeh_restore_device_bars(edev); - if (eeh_ops->restore_config && pdn) - eeh_ops->restore_config(pdn); + if (eeh_ops->restore_config) + eeh_ops->restore_config(edev); } /** diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 8f3a7611efc1..a41e67f674e6 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1619,12 +1619,10 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) return ret; } -static int pnv_eeh_restore_config(struct pci_dn *pdn) +static int pnv_eeh_restore_config(struct eeh_dev *edev) { - struct eeh_dev *edev = pdn_to_eeh_dev(pdn); struct pnv_phb *phb; s64 ret = 0; - int config_addr = (pdn->busno << 8) | (pdn->devfn); if (!edev) return -EEXIST; @@ -1638,7 +1636,7 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn) if (ret) { pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", - __func__, config_addr, ret); + __func__, edev->bdfn, ret); return -EIO; } -- cgit v1.2.3 From 8225d543dc0170e5b61af8559af07ec4f26f0bd6 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:25 +1000 Subject: powerpc/eeh: Pass eeh_dev to eeh_ops->resume_notify() Mechanical conversion of the eeh_ops interfaces to use eeh_dev to reference a specific device rather than pci_dn. No functional changes. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-8-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 2 +- arch/powerpc/kernel/eeh_driver.c | 4 ++-- arch/powerpc/kernel/eeh_sysfs.c | 2 +- arch/powerpc/platforms/pseries/eeh_pseries.c | 4 +--- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index c23c8fd5229d..73e6a73a3684 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -230,7 +230,7 @@ struct eeh_ops { int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val); int (*next_error)(struct eeh_pe **pe); int (*restore_config)(struct eeh_dev *edev); - int (*notify_resume)(struct pci_dn *pdn); + int (*notify_resume)(struct eeh_dev *edev); }; extern int eeh_subsystem_flags; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index b70b9273f45a..b84d3cb2532e 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -425,8 +425,8 @@ static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev, pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED); #ifdef CONFIG_PCI_IOV - if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev)) - eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); + if (eeh_ops->notify_resume) + eeh_ops->notify_resume(edev); #endif return PCI_ERS_RESULT_NONE; } diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index 4fb0f1e1017a..429620da73ba 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -99,7 +99,7 @@ static ssize_t eeh_notify_resume_store(struct device *dev, if (!edev || !edev->pe || !eeh_ops->notify_resume) return -ENODEV; - if (eeh_ops->notify_resume(pci_get_pdn(pdev))) + if (eeh_ops->notify_resume(edev)) return -EIO; return count; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index e32b49ead313..57de8acad5f5 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -899,10 +899,8 @@ static int pseries_call_allow_unfreeze(struct eeh_dev *edev) return rc; } -static int pseries_notify_resume(struct pci_dn *pdn) +static int pseries_notify_resume(struct eeh_dev *edev) { - struct eeh_dev *edev = pdn_to_eeh_dev(pdn); - if (!edev) return -EEXIST; -- cgit v1.2.3 From 17d2a4870467bc8e8966304c08980571da943558 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:26 +1000 Subject: powerpc/eeh: Pass eeh_dev to eeh_ops->{read|write}_config() Mechanical conversion of the eeh_ops interfaces to use eeh_dev to reference a specific device rather than pci_dn. No functional changes. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-9-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 4 +-- arch/powerpc/kernel/eeh.c | 22 ++++++------- arch/powerpc/kernel/eeh_pe.c | 47 +++++++++++++--------------- arch/powerpc/platforms/powernv/eeh-powernv.c | 43 ++++++++++++++----------- arch/powerpc/platforms/pseries/eeh_pseries.c | 16 ++++++---- 5 files changed, 68 insertions(+), 64 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 73e6a73a3684..f8ef27f75c37 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -226,8 +226,8 @@ struct eeh_ops { int (*configure_bridge)(struct eeh_pe *pe); int (*err_inject)(struct eeh_pe *pe, int type, int func, unsigned long addr, unsigned long mask); - int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val); - int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val); + int (*read_config)(struct eeh_dev *edev, int where, int size, u32 *val); + int (*write_config)(struct eeh_dev *edev, int where, int size, u32 val); int (*next_error)(struct eeh_pe **pe); int (*restore_config)(struct eeh_dev *edev); int (*notify_resume)(struct eeh_dev *edev); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 1cef0f4bb2d5..1a12c8bdf61e 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -185,21 +185,21 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); - eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg); + eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg); n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); pr_warn("EEH: PCI device/vendor: %08x\n", cfg); - eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg); + eeh_ops->read_config(edev, PCI_COMMAND, 4, &cfg); n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); /* Gather bridge-specific registers */ if (edev->mode & EEH_DEV_BRIDGE) { - eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg); + eeh_ops->read_config(edev, PCI_SEC_STATUS, 2, &cfg); n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); pr_warn("EEH: Bridge secondary status: %04x\n", cfg); - eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg); + eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &cfg); n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); pr_warn("EEH: Bridge control: %04x\n", cfg); } @@ -207,11 +207,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) /* Dump out the PCI-X command and status regs */ cap = edev->pcix_cap; if (cap) { - eeh_ops->read_config(pdn, cap, 4, &cfg); + eeh_ops->read_config(edev, cap, 4, &cfg); n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); pr_warn("EEH: PCI-X cmd: %08x\n", cfg); - eeh_ops->read_config(pdn, cap+4, 4, &cfg); + eeh_ops->read_config(edev, cap+4, 4, &cfg); n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); pr_warn("EEH: PCI-X status: %08x\n", cfg); } @@ -223,7 +223,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) pr_warn("EEH: PCI-E capabilities and status follow:\n"); for (i=0; i<=8; i++) { - eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); + eeh_ops->read_config(edev, cap+4*i, 4, &cfg); n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); if ((i % 4) == 0) { @@ -250,7 +250,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) pr_warn("EEH: PCI-E AER capability register set follows:\n"); for (i=0; i<=13; i++) { - eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); + eeh_ops->read_config(edev, cap+4*i, 4, &cfg); n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); if ((i % 4) == 0) { @@ -917,15 +917,13 @@ int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed) */ void eeh_save_bars(struct eeh_dev *edev) { - struct pci_dn *pdn; int i; - pdn = eeh_dev_to_pdn(edev); - if (!pdn) + if (!edev) return; for (i = 0; i < 16; i++) - eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]); + eeh_ops->read_config(edev, i * 4, 4, &edev->config_space[i]); /* * For PCI bridges including root port, we need enable bus diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index d71493f66917..f20fb0ee6aec 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -698,7 +698,6 @@ void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed) */ static void eeh_bridge_check_link(struct eeh_dev *edev) { - struct pci_dn *pdn = eeh_dev_to_pdn(edev); int cap; uint32_t val; int timeout = 0; @@ -714,32 +713,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) /* Check slot status */ cap = edev->pcie_cap; - eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); + eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val); if (!(val & PCI_EXP_SLTSTA_PDS)) { eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val); return; } /* Check power status if we have the capability */ - eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val); + eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val); if (val & PCI_EXP_SLTCAP_PCP) { - eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); + eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val); if (val & PCI_EXP_SLTCTL_PCC) { eeh_edev_dbg(edev, "In power-off state, power it on ...\n"); val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); val |= (0x0100 & PCI_EXP_SLTCTL_PIC); - eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); + eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val); msleep(2 * 1000); } } /* Enable link */ - eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val); + eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val); val &= ~PCI_EXP_LNKCTL_LD; - eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val); + eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val); /* Check link */ - eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); + eeh_ops->read_config(edev, cap + PCI_EXP_LNKCAP, 4, &val); if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val); msleep(1000); @@ -752,7 +751,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) msleep(20); timeout += 20; - eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val); + eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val); if (val & PCI_EXP_LNKSTA_DLLLA) break; } @@ -769,7 +768,6 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) static void eeh_restore_bridge_bars(struct eeh_dev *edev) { - struct pci_dn *pdn = eeh_dev_to_pdn(edev); int i; /* @@ -777,20 +775,20 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) * Bus numbers and windows: 0x18 - 0x30 */ for (i = 4; i < 13; i++) - eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); + eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]); /* Rom: 0x38 */ - eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]); + eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]); /* Cache line & Latency timer: 0xC 0xD */ - eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, + eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1, SAVED_BYTE(PCI_CACHE_LINE_SIZE)); - eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, - SAVED_BYTE(PCI_LATENCY_TIMER)); + eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1, + SAVED_BYTE(PCI_LATENCY_TIMER)); /* Max latency, min grant, interrupt ping and line: 0x3C */ - eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); + eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]); /* PCI Command: 0x4 */ - eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | + eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* Check the PCIe link is ready */ @@ -799,28 +797,27 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) static void eeh_restore_device_bars(struct eeh_dev *edev) { - struct pci_dn *pdn = eeh_dev_to_pdn(edev); int i; u32 cmd; for (i = 4; i < 10; i++) - eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); + eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]); /* 12 == Expansion ROM Address */ - eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]); + eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]); - eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, + eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1, SAVED_BYTE(PCI_CACHE_LINE_SIZE)); - eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, + eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1, SAVED_BYTE(PCI_LATENCY_TIMER)); /* max latency, min grant, interrupt pin and line */ - eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); + eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]); /* * Restore PERR & SERR bits, some devices require it, * don't touch the other command bits */ - eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd); + eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd); if (edev->config_space[1] & PCI_COMMAND_PARITY) cmd |= PCI_COMMAND_PARITY; else @@ -829,7 +826,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev) cmd |= PCI_COMMAND_SERR; else cmd &= ~PCI_COMMAND_SERR; - eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd); + eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd); } /** diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index a41e67f674e6..c9f2f454d053 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -838,32 +838,32 @@ static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) case EEH_RESET_HOT: /* Don't report linkDown event */ if (aer) { - eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, + eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, &ctrl); ctrl |= PCI_ERR_UNC_SURPDN; - eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, + eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, ctrl); } - eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); + eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); ctrl |= PCI_BRIDGE_CTL_BUS_RESET; - eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); + eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); msleep(EEH_PE_RST_HOLD_TIME); break; case EEH_RESET_DEACTIVATE: - eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); + eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; - eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); + eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); msleep(EEH_PE_RST_SETTLE_TIME); /* Continue reporting linkDown event */ if (aer) { - eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, + eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, &ctrl); ctrl &= ~PCI_ERR_UNC_SURPDN; - eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, + eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, ctrl); } @@ -932,11 +932,12 @@ void pnv_pci_reset_secondary_bus(struct pci_dev *dev) static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, int pos, u16 mask) { + struct eeh_dev *edev = pdn->edev; int i, status = 0; /* Wait for Transaction Pending bit to be cleared */ for (i = 0; i < 4; i++) { - eeh_ops->read_config(pdn, pos, 2, &status); + eeh_ops->read_config(edev, pos, 2, &status); if (!(status & mask)) return; @@ -957,7 +958,7 @@ static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) if (WARN_ON(!edev->pcie_cap)) return -ENOTTY; - eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®); + eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®); if (!(reg & PCI_EXP_DEVCAP_FLR)) return -ENOTTY; @@ -967,18 +968,18 @@ static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) pnv_eeh_wait_for_pending(pdn, "", edev->pcie_cap + PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); - eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, + eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, ®); reg |= PCI_EXP_DEVCTL_BCR_FLR; - eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, + eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, reg); msleep(EEH_PE_RST_HOLD_TIME); break; case EEH_RESET_DEACTIVATE: - eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, + eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, ®); reg &= ~PCI_EXP_DEVCTL_BCR_FLR; - eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, + eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, reg); msleep(EEH_PE_RST_SETTLE_TIME); break; @@ -995,7 +996,7 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) if (WARN_ON(!edev->af_cap)) return -ENOTTY; - eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap); + eeh_ops->read_config(edev, edev->af_cap + PCI_AF_CAP, 1, &cap); if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) return -ENOTTY; @@ -1010,12 +1011,12 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) pnv_eeh_wait_for_pending(pdn, "AF", edev->af_cap + PCI_AF_CTRL, PCI_AF_STATUS_TP << 8); - eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, + eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, PCI_AF_CTRL_FLR); msleep(EEH_PE_RST_HOLD_TIME); break; case EEH_RESET_DEACTIVATE: - eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0); + eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, 0); msleep(EEH_PE_RST_SETTLE_TIME); break; } @@ -1249,9 +1250,11 @@ static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) return false; } -static int pnv_eeh_read_config(struct pci_dn *pdn, +static int pnv_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val) { + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; @@ -1263,9 +1266,11 @@ static int pnv_eeh_read_config(struct pci_dn *pdn, return pnv_pci_cfg_read(pdn, where, size, val); } -static int pnv_eeh_write_config(struct pci_dn *pdn, +static int pnv_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val) { + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 57de8acad5f5..53e5381b3d53 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -798,29 +798,33 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) /** * pseries_eeh_read_config - Read PCI config space - * @pdn: PCI device node - * @where: PCI address + * @edev: EEH device handle + * @where: PCI config space offset * @size: size to read * @val: return value * * Read config space from the speicifed device */ -static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val) +static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val) { + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + return rtas_read_config(pdn, where, size, val); } /** * pseries_eeh_write_config - Write PCI config space - * @pdn: PCI device node - * @where: PCI address + * @edev: EEH device handle + * @where: PCI config space offset * @size: size to write * @val: value to be written * * Write config space to the specified device */ -static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val) +static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val) { + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + return rtas_write_config(pdn, where, size, val); } -- cgit v1.2.3 From 1a303d8844d082ef58ff5fc3005b99621a3263ba Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:27 +1000 Subject: powerpc/eeh: Remove spurious use of pci_dn in eeh_dump_dev_log Retrieve the domain, bus, device, and function numbers from the edev. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-10-oohall@gmail.com --- arch/powerpc/kernel/eeh.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 1a12c8bdf61e..f203ffc5c57d 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -167,23 +167,17 @@ void eeh_show_enabled(void) */ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) { - struct pci_dn *pdn = eeh_dev_to_pdn(edev); u32 cfg; int cap, i; int n = 0, l = 0; char buffer[128]; - if (!pdn) { - pr_warn("EEH: Note: No error log for absent device.\n"); - return 0; - } - n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", - pdn->phb->global_number, pdn->busno, - PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); + edev->pe->phb->global_number, edev->bdfn >> 8, + PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn)); pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", - pdn->phb->global_number, pdn->busno, - PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); + edev->pe->phb->global_number, edev->bdfn >> 8, + PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn)); eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg); n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); -- cgit v1.2.3 From 768a42845b9ecdb28ba1991e17088b7eeb23a3eb Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:28 +1000 Subject: powerpc/eeh: Remove class code field from edev The edev->class_code field is never referenced anywhere except for the platform specific probe functions. The same information is available in the pci_dev for PowerNV and in the pci_dn on pseries so we can remove the field. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-11-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 1 - arch/powerpc/platforms/powernv/eeh-powernv.c | 5 ++--- arch/powerpc/platforms/pseries/eeh_pseries.c | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index f8ef27f75c37..79de8624809a 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -133,7 +133,6 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe) struct eeh_dev { int mode; /* EEH mode */ - int class_code; /* Class code of the device */ int bdfn; /* bdfn of device (for cfg ops) */ struct pci_controller *controller; int pe_config_addr; /* PE config address */ diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index c9f2f454d053..7cbb03a97a61 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -372,19 +372,18 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) } /* Skip for PCI-ISA bridge */ - if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) + if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) return NULL; eeh_edev_dbg(edev, "Probing device\n"); /* Initialize eeh device */ - edev->class_code = pdn->class_code; edev->mode &= 0xFFFFFF00; edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF); edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); - if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { + if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { edev->mode |= EEH_DEV_BRIDGE; if (edev->pcie_cap) { pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 53e5381b3d53..902b929ef307 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -440,12 +440,11 @@ void pseries_eeh_init_edev(struct pci_dn *pdn) * correctly reflects that current device is root port * or PCIe switch downstream port. */ - edev->class_code = pdn->class_code; edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP); edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); edev->mode &= 0xFFFFFF00; - if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { + if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { edev->mode |= EEH_DEV_BRIDGE; if (edev->pcie_cap) { rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS, -- cgit v1.2.3 From d923ab7a96fcc2b46aac9b2fc38ffdca72436fd1 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:29 +1000 Subject: powerpc/eeh: Rename eeh_{add_to|remove_from}_parent_pe() The naming of eeh_{add_to|remove_from}_parent_pe() doesn't really reflect what they actually do. If the PE referred to be edev->pe_config_addr already exists under that PHB then the edev is added to that PE. However, if the PE doesn't exist the a new one is created for the edev. The bulk of the implementation of eeh_add_to_parent_pe() covers that second case. Similarly, most of eeh_remove_from_parent_pe() is determining when it's safe to delete a PE. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-12-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 4 ++-- arch/powerpc/kernel/eeh.c | 4 ++-- arch/powerpc/kernel/eeh_driver.c | 2 +- arch/powerpc/kernel/eeh_pe.c | 8 ++++---- arch/powerpc/kernel/pci_dn.c | 2 +- arch/powerpc/platforms/powernv/eeh-powernv.c | 2 +- arch/powerpc/platforms/pseries/eeh_pseries.c | 8 ++++---- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 79de8624809a..ee5f354bc8b9 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -283,8 +283,8 @@ struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root); struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no, int config_addr); -int eeh_add_to_parent_pe(struct eeh_dev *edev); -int eeh_rmv_from_parent_pe(struct eeh_dev *edev); +int eeh_pe_tree_insert(struct eeh_dev *edev); +int eeh_pe_tree_remove(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); void *eeh_pe_traverse(struct eeh_pe *root, eeh_pe_traverse_func fn, void *flag); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index f203ffc5c57d..94682382fc8c 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1107,7 +1107,7 @@ void eeh_probe_device(struct pci_dev *dev) * FIXME: HEY MA, LOOK AT ME, NO LOCKING! */ if (edev->pdev && edev->pdev != dev) { - eeh_rmv_from_parent_pe(edev); + eeh_pe_tree_remove(edev); eeh_addr_cache_rmv_dev(edev->pdev); eeh_sysfs_remove_device(edev->pdev); @@ -1186,7 +1186,7 @@ void eeh_remove_device(struct pci_dev *dev) edev->in_error = false; dev->dev.archdata.edev = NULL; if (!(edev->pe->state & EEH_PE_KEEP)) - eeh_rmv_from_parent_pe(edev); + eeh_pe_tree_remove(edev); else edev->mode |= EEH_DEV_DISCONNECTED; } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index b84d3cb2532e..4197e4559f65 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -542,7 +542,7 @@ static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata) continue; edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED); - eeh_rmv_from_parent_pe(edev); + eeh_pe_tree_remove(edev); } return NULL; diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index f20fb0ee6aec..97bf09db2ecd 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -356,7 +356,7 @@ static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) } /** - * eeh_add_to_parent_pe - Add EEH device to parent PE + * eeh_pe_tree_insert - Add EEH device to parent PE * @edev: EEH device * * Add EEH device to the parent PE. If the parent PE already @@ -364,7 +364,7 @@ static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) * we have to create new PE to hold the EEH device and the new * PE will be linked to its parent PE as well. */ -int eeh_add_to_parent_pe(struct eeh_dev *edev) +int eeh_pe_tree_insert(struct eeh_dev *edev) { struct eeh_pe *pe, *parent; struct pci_dn *pdn = eeh_dev_to_pdn(edev); @@ -459,7 +459,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) } /** - * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE + * eeh_pe_tree_remove - Remove one EEH device from the associated PE * @edev: EEH device * * The PE hierarchy tree might be changed when doing PCI hotplug. @@ -467,7 +467,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) * during EEH recovery. So we have to call the function remove the * corresponding PE accordingly if necessary. */ -int eeh_rmv_from_parent_pe(struct eeh_dev *edev) +int eeh_pe_tree_remove(struct eeh_dev *edev) { struct eeh_pe *pe, *parent, *child; bool keep, recover; diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index bf11ac8427ac..e99b7c547d7e 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -263,7 +263,7 @@ void remove_sriov_vf_pdns(struct pci_dev *pdev) * have a configured PE. */ if (edev->pe) - eeh_rmv_from_parent_pe(edev); + eeh_pe_tree_remove(edev); pdn->edev = NULL; kfree(edev); diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 7cbb03a97a61..8c9fca773692 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -399,7 +399,7 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; /* Create PE */ - ret = eeh_add_to_parent_pe(edev); + ret = eeh_pe_tree_insert(edev); if (ret) { eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); return NULL; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 902b929ef307..69650d8dc705 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -72,8 +72,8 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev) struct eeh_dev *edev = pdn_to_eeh_dev(pdn); edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8); - eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */ - eeh_add_to_parent_pe(edev); /* Add as VF PE type */ + eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */ + eeh_pe_tree_insert(edev); /* Add as VF PE type */ } #endif eeh_probe_device(pdev); @@ -482,14 +482,14 @@ void pseries_eeh_init_edev(struct pci_dn *pdn) if (enable) { eeh_add_flag(EEH_ENABLED); - eeh_add_to_parent_pe(edev); + eeh_pe_tree_insert(edev); } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && (pdn_to_eeh_dev(pdn->parent))->pe) { /* This device doesn't support EEH, but it may have an * EEH parent, in which case we mark it as supported. */ edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; - eeh_add_to_parent_pe(edev); + eeh_pe_tree_insert(edev); } eeh_edev_dbg(edev, "EEH is %s on device (code %d)\n", (enable ? "enabled" : "unsupported"), ret); -- cgit v1.2.3 From 31595ae5aece519be5faa2e2013278ce45894d26 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:30 +1000 Subject: powerpc/eeh: Drop pdn use in eeh_pe_tree_insert() This is mostly just to make the subsequent diffs less noisy. No functional changes. One thing that needs calling out is the removal of the "config_addr" variable and replacing it with edev->bdfn. The contents of edev->bdfn are the same, however it's worth pointing out that what RTAS calls a "config_addr" isn't the same as the bdfn. The config_addr is supposed to be: with each field being an 8 bit number. Various parts of the EEH code use BDFN and "config_addr" as interchangeable quantities even though they aren't really. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-13-oohall@gmail.com --- arch/powerpc/kernel/eeh_pe.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 97bf09db2ecd..898205829a8f 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -366,9 +366,8 @@ static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) */ int eeh_pe_tree_insert(struct eeh_dev *edev) { + struct pci_controller *hose = edev->controller; struct eeh_pe *pe, *parent; - struct pci_dn *pdn = eeh_dev_to_pdn(edev); - int config_addr = (pdn->busno << 8) | (pdn->devfn); /* Check if the PE number is valid */ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { @@ -382,7 +381,7 @@ int eeh_pe_tree_insert(struct eeh_dev *edev) * PE should be composed of PCI bus and its subordinate * components. */ - pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr); + pe = eeh_pe_get(hose, edev->pe_config_addr, edev->bdfn); if (pe) { if (pe->type & EEH_PE_INVALID) { list_add_tail(&edev->entry, &pe->edevs); @@ -416,15 +415,15 @@ int eeh_pe_tree_insert(struct eeh_dev *edev) /* Create a new EEH PE */ if (edev->physfn) - pe = eeh_pe_alloc(pdn->phb, EEH_PE_VF); + pe = eeh_pe_alloc(hose, EEH_PE_VF); else - pe = eeh_pe_alloc(pdn->phb, EEH_PE_DEVICE); + pe = eeh_pe_alloc(hose, EEH_PE_DEVICE); if (!pe) { pr_err("%s: out of memory!\n", __func__); return -ENOMEM; } pe->addr = edev->pe_config_addr; - pe->config_addr = config_addr; + pe->config_addr = edev->bdfn; /* * Put the new EEH PE into hierarchy tree. If the parent @@ -434,10 +433,10 @@ int eeh_pe_tree_insert(struct eeh_dev *edev) */ parent = eeh_pe_get_parent(edev); if (!parent) { - parent = eeh_phb_pe_get(pdn->phb); + parent = eeh_phb_pe_get(hose); if (!parent) { pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", - __func__, pdn->phb->global_number); + __func__, hose->global_number); edev->pe = NULL; kfree(pe); return -EEXIST; -- cgit v1.2.3 From a131bfc69bc868083a6c7f9b5dad1331902a3534 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Sat, 25 Jul 2020 18:12:31 +1000 Subject: powerpc/eeh: Move PE tree setup into the platform The EEH core has a concept of a "PE tree" to support PowerNV. The PE tree follows the PCI bus structures because a reset asserted on an upstream bridge will be propagated to the downstream bridges. On pseries there's a 1-1 correspondence between what the guest sees are a PHB and a PE so the "tree" is really just a single node. Current the EEH core is reponsible for setting up this PE tree which it does by traversing the pci_dn tree. The structure of the pci_dn tree matches the bus tree on PowerNV which leads to the PE tree being "correct" this setup method doesn't make a whole lot of sense and it's actively confusing for the pseries case where it doesn't really do anything. We want to remove the dependence on pci_dn anyway so this patch move choosing where to insert a new PE into the platform code rather than being part of the generic EEH code. For PowerNV this simplifies the tree building logic and removes the use of pci_dn. For pseries we keep the existing logic. I'm not really convinced it does anything due to the 1-1 PE-to-PHB correspondence so every device under that PHB should be in the same PE, but I'd rather not remove it entirely until we've had a chance to look at it more deeply. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725081231.39076-14-oohall@gmail.com --- arch/powerpc/include/asm/eeh.h | 2 +- arch/powerpc/kernel/eeh_pe.c | 71 ++++++++-------------------- arch/powerpc/platforms/powernv/eeh-powernv.c | 27 ++++++++++- arch/powerpc/platforms/pseries/eeh_pseries.c | 67 ++++++++++++++++++++++---- 4 files changed, 103 insertions(+), 64 deletions(-) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index ee5f354bc8b9..d5f369bcd130 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -283,7 +283,7 @@ struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root); struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no, int config_addr); -int eeh_pe_tree_insert(struct eeh_dev *edev); +int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent); int eeh_pe_tree_remove(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); void *eeh_pe_traverse(struct eeh_pe *root, diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 898205829a8f..d2aaaa73fdd5 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -318,53 +318,20 @@ struct eeh_pe *eeh_pe_get(struct pci_controller *phb, return pe; } -/** - * eeh_pe_get_parent - Retrieve the parent PE - * @edev: EEH device - * - * The whole PEs existing in the system are organized as hierarchy - * tree. The function is used to retrieve the parent PE according - * to the parent EEH device. - */ -static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) -{ - struct eeh_dev *parent; - struct pci_dn *pdn = eeh_dev_to_pdn(edev); - - /* - * It might have the case for the indirect parent - * EEH device already having associated PE, but - * the direct parent EEH device doesn't have yet. - */ - if (edev->physfn) - pdn = pci_get_pdn(edev->physfn); - else - pdn = pdn ? pdn->parent : NULL; - while (pdn) { - /* We're poking out of PCI territory */ - parent = pdn_to_eeh_dev(pdn); - if (!parent) - return NULL; - - if (parent->pe) - return parent->pe; - - pdn = pdn->parent; - } - - return NULL; -} - /** * eeh_pe_tree_insert - Add EEH device to parent PE * @edev: EEH device + * @new_pe_parent: PE to create additional PEs under * - * Add EEH device to the parent PE. If the parent PE already - * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, - * we have to create new PE to hold the EEH device and the new - * PE will be linked to its parent PE as well. + * Add EEH device to the PE in edev->pe_config_addr. If a PE already + * exists with that address then @edev is added to that PE. Otherwise + * a new PE is created and inserted into the PE tree as a child of + * @new_pe_parent. + * + * If @new_pe_parent is NULL then the new PE will be inserted under + * directly under the the PHB. */ -int eeh_pe_tree_insert(struct eeh_dev *edev) +int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent) { struct pci_controller *hose = edev->controller; struct eeh_pe *pe, *parent; @@ -398,8 +365,7 @@ int eeh_pe_tree_insert(struct eeh_dev *edev) parent = parent->parent; } - eeh_edev_dbg(edev, - "Added to device PE (parent: PE#%x)\n", + eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n", pe->parent->addr); } else { /* Mark the PE as type of PCI bus */ @@ -431,10 +397,9 @@ int eeh_pe_tree_insert(struct eeh_dev *edev) * to PHB directly. Otherwise, we have to associate the * PE with its parent. */ - parent = eeh_pe_get_parent(edev); - if (!parent) { - parent = eeh_phb_pe_get(hose); - if (!parent) { + if (!new_pe_parent) { + new_pe_parent = eeh_phb_pe_get(hose); + if (!new_pe_parent) { pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", __func__, hose->global_number); edev->pe = NULL; @@ -442,17 +407,19 @@ int eeh_pe_tree_insert(struct eeh_dev *edev) return -EEXIST; } } - pe->parent = parent; + + /* link new PE into the tree */ + pe->parent = new_pe_parent; + list_add_tail(&pe->child, &new_pe_parent->child_list); /* * Put the newly created PE into the child list and * link the EEH device accordingly. */ - list_add_tail(&pe->child, &parent->child_list); list_add_tail(&edev->entry, &pe->edevs); edev->pe = pe; - eeh_edev_dbg(edev, "Added to device PE (parent: PE#%x)\n", - pe->parent->addr); + eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n", + new_pe_parent->addr); return 0; } diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 8c9fca773692..9af8c3b98853 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -338,6 +338,28 @@ static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) return 0; } +static struct eeh_pe *pnv_eeh_get_upstream_pe(struct pci_dev *pdev) +{ + struct pci_controller *hose = pdev->bus->sysdata; + struct pnv_phb *phb = hose->private_data; + struct pci_dev *parent = pdev->bus->self; + +#ifdef CONFIG_PCI_IOV + /* for VFs we use the PF's PE as the upstream PE */ + if (pdev->is_virtfn) + parent = pdev->physfn; +#endif + + /* otherwise use the PE of our parent bridge */ + if (parent) { + struct pnv_ioda_pe *ioda_pe = pnv_ioda_get_pe(parent); + + return eeh_pe_get(phb->hose, ioda_pe->pe_number, 0); + } + + return NULL; +} + /** * pnv_eeh_probe - Do probe on PCI device * @pdev: pci_dev to probe @@ -350,6 +372,7 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) struct pci_controller *hose = pdn->phb; struct pnv_phb *phb = hose->private_data; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); + struct eeh_pe *upstream_pe; uint32_t pcie_flags; int ret; int config_addr = (pdn->busno << 8) | (pdn->devfn); @@ -398,8 +421,10 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; + upstream_pe = pnv_eeh_get_upstream_pe(pdev); + /* Create PE */ - ret = eeh_pe_tree_insert(edev); + ret = eeh_pe_tree_insert(edev, upstream_pe); if (ret) { eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); return NULL; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 69650d8dc705..cb2d9a970b7b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -69,11 +69,16 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev) pseries_eeh_init_edev(pdn); #ifdef CONFIG_PCI_IOV if (pdev->is_virtfn) { + /* + * FIXME: This really should be handled by choosing the right + * parent PE in in pseries_eeh_init_edev(). + */ + struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8); eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */ - eeh_pe_tree_insert(edev); /* Add as VF PE type */ + eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */ } #endif eeh_probe_device(pdev); @@ -385,6 +390,43 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) return 0; } +/** + * pseries_eeh_pe_get_parent - Retrieve the parent PE + * @edev: EEH device + * + * The whole PEs existing in the system are organized as hierarchy + * tree. The function is used to retrieve the parent PE according + * to the parent EEH device. + */ +static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev) +{ + struct eeh_dev *parent; + struct pci_dn *pdn = eeh_dev_to_pdn(edev); + + /* + * It might have the case for the indirect parent + * EEH device already having associated PE, but + * the direct parent EEH device doesn't have yet. + */ + if (edev->physfn) + pdn = pci_get_pdn(edev->physfn); + else + pdn = pdn ? pdn->parent : NULL; + while (pdn) { + /* We're poking out of PCI territory */ + parent = pdn_to_eeh_dev(pdn); + if (!parent) + return NULL; + + if (parent->pe) + return parent->pe; + + pdn = pdn->parent; + } + + return NULL; +} + /** * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn * @@ -468,6 +510,8 @@ void pseries_eeh_init_edev(struct pci_dn *pdn) if (ret) { eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret); } else { + struct eeh_pe *parent; + /* Retrieve PE address */ edev->pe_config_addr = pseries_eeh_get_pe_addr(pdn); pe.addr = edev->pe_config_addr; @@ -480,16 +524,19 @@ void pseries_eeh_init_edev(struct pci_dn *pdn) if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) enable = 1; - if (enable) { + /* + * This device doesn't support EEH, but it may have an + * EEH parent. In this case any error on the device will + * freeze the PE of it's upstream bridge, so added it to + * the upstream PE. + */ + parent = pseries_eeh_pe_get_parent(edev); + if (parent && !enable) + edev->pe_config_addr = parent->addr; + + if (enable || parent) { eeh_add_flag(EEH_ENABLED); - eeh_pe_tree_insert(edev); - } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && - (pdn_to_eeh_dev(pdn->parent))->pe) { - /* This device doesn't support EEH, but it may have an - * EEH parent, in which case we mark it as supported. - */ - edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; - eeh_pe_tree_insert(edev); + eeh_pe_tree_insert(edev, parent); } eeh_edev_dbg(edev, "EEH is %s on device (code %d)\n", (enable ? "enabled" : "unsupported"), ret); -- cgit v1.2.3 From 5609ffddd19dd52019d78b197e86b0331aeef8ae Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:00 +1000 Subject: powerpc/powernv/pci: Add pci_bus_to_pnvhb() helper Add a helper to go from a pci_bus structure to the pnv_phb that hosts that bus. There's a lot of instances of the following pattern: struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; Without any other uses of the pci_controller inside the function. This is hard to read since it requires you to memorise the contents of the private data fields and kind of error prone since it involves blindly assigning a void pointer. Add a helper to make it more concise and explicit. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-1-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 88 +++++++++---------------------- arch/powerpc/platforms/powernv/pci.c | 14 ++--- arch/powerpc/platforms/powernv/pci.h | 10 ++++ 3 files changed, 38 insertions(+), 74 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 31c3e6d58c41..687919db0347 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -252,8 +252,7 @@ fail: static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, unsigned long *pe_bitmap) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct resource *r; resource_size_t base, sgsz, start, end; int segno, i; @@ -351,8 +350,7 @@ static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus, static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) { - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pnv_ioda_pe *master_pe, *pe; unsigned long size, *pe_alloc; int i; @@ -673,8 +671,7 @@ struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn) struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); struct pci_dn *pdn = pci_get_pdn(dev); if (!pdn) @@ -1069,8 +1066,7 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); struct pci_dn *pdn = pci_get_pdn(dev); struct pnv_ioda_pe *pe; @@ -1129,8 +1125,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) */ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) { - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pnv_ioda_pe *pe = NULL; unsigned int pe_num; @@ -1196,8 +1191,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) struct pnv_ioda_pe *pe; struct pci_dev *gpu_pdev; struct pci_dn *npu_pdn; - struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(npu_pdev->bus); /* * Intentionally leak a reference on the npu device (for @@ -1300,16 +1294,12 @@ static void pnv_pci_ioda_setup_nvlink(void) #ifdef CONFIG_PCI_IOV static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) { - struct pci_bus *bus; - struct pci_controller *hose; struct pnv_phb *phb; struct pci_dn *pdn; int i, j; int m64_bars; - bus = pdev->bus; - hose = pci_bus_to_host(bus); - phb = hose->private_data; + phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); if (pdn->m64_single_mode) @@ -1333,8 +1323,6 @@ static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) { - struct pci_bus *bus; - struct pci_controller *hose; struct pnv_phb *phb; struct pci_dn *pdn; unsigned int win; @@ -1346,9 +1334,7 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) int pe_num; int m64_bars; - bus = pdev->bus; - hose = pci_bus_to_host(bus); - phb = hose->private_data; + phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); total_vfs = pci_sriov_get_totalvfs(pdev); @@ -1459,15 +1445,11 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) { - struct pci_bus *bus; - struct pci_controller *hose; struct pnv_phb *phb; struct pnv_ioda_pe *pe, *pe_n; struct pci_dn *pdn; - bus = pdev->bus; - hose = pci_bus_to_host(bus); - phb = hose->private_data; + phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); if (!pdev->is_physfn) @@ -1492,16 +1474,12 @@ static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) static void pnv_pci_sriov_disable(struct pci_dev *pdev) { - struct pci_bus *bus; - struct pci_controller *hose; struct pnv_phb *phb; struct pnv_ioda_pe *pe; struct pci_dn *pdn; u16 num_vfs, i; - bus = pdev->bus; - hose = pci_bus_to_host(bus); - phb = hose->private_data; + phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); num_vfs = pdn->num_vfs; @@ -1535,17 +1513,13 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) { - struct pci_bus *bus; - struct pci_controller *hose; struct pnv_phb *phb; struct pnv_ioda_pe *pe; int pe_num; u16 vf_index; struct pci_dn *pdn; - bus = pdev->bus; - hose = pci_bus_to_host(bus); - phb = hose->private_data; + phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); if (!pdev->is_physfn) @@ -1572,7 +1546,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pe->rid = (vf_bus << 8) | vf_devfn; pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", - hose->global_number, pdev->bus->number, + pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num); if (pnv_ioda_configure_pe(phb, pe)) { @@ -1602,17 +1576,13 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { - struct pci_bus *bus; - struct pci_controller *hose; struct pnv_phb *phb; struct pnv_ioda_pe *pe; struct pci_dn *pdn; int ret; u16 i; - bus = pdev->bus; - hose = pci_bus_to_host(bus); - phb = hose->private_data; + phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); if (phb->type == PNV_PHB_IODA2) { @@ -1735,8 +1705,7 @@ static int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; @@ -1847,8 +1816,7 @@ err: static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, u64 dma_mask) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; @@ -2766,8 +2734,7 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) #ifdef CONFIG_PCI_IOV static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); const resource_size_t gate = phb->ioda.m64_segsize >> 2; struct resource *res; int i; @@ -3101,10 +3068,9 @@ static void pnv_pci_ioda_fixup(void) static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, unsigned long type) { - struct pci_dev *bridge; - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(bus); int num_pci_bridges = 0; + struct pci_dev *bridge; bridge = bus->self; while (bridge) { @@ -3190,8 +3156,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, static void pnv_pci_configure_bus(struct pci_bus *bus) { - struct pci_controller *hose = pci_bus_to_host(bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pci_dev *bridge = bus->self; struct pnv_ioda_pe *pe; bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); @@ -3237,8 +3202,7 @@ static resource_size_t pnv_pci_default_alignment(void) static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); resource_size_t align; @@ -3274,8 +3238,7 @@ static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, */ static bool pnv_pci_enable_device_hook(struct pci_dev *dev) { - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); struct pci_dn *pdn; /* The function is probably called while the PEs have @@ -3488,8 +3451,7 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) static void pnv_pci_release_device(struct pci_dev *pdev) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; @@ -3534,8 +3496,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose) static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus) { - struct pci_controller *hose = bus->sysdata; - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pnv_ioda_pe *pe; list_for_each_entry(pe, &phb->ioda.pe_list, list) { @@ -3873,8 +3834,7 @@ void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np) static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev) { - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); if (!machine_is(powernv)) return; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 091fe1cf386b..9b9bca169275 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -162,8 +162,7 @@ EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct msi_desc *entry; struct msi_msg msg; int hwirq; @@ -211,8 +210,7 @@ int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) void pnv_teardown_msi_irqs(struct pci_dev *pdev) { - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct msi_desc *entry; irq_hw_number_t hwirq; @@ -824,10 +822,9 @@ EXPORT_SYMBOL(pnv_pci_get_phb_node); int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) { - __be64 val; - struct pci_controller *hose; - struct pnv_phb *phb; + struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); u64 tunnel_bar; + __be64 val; int rc; if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) @@ -835,9 +832,6 @@ int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) return -ENXIO; - hose = pci_bus_to_host(dev->bus); - phb = hose->private_data; - mutex_lock(&tunnel_mutex); rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); if (rc != OPAL_SUCCESS) { diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 51c254f2f3cb..0727dec9a0d1 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -260,4 +260,14 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, extern unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb); +static inline struct pnv_phb *pci_bus_to_pnvhb(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + if (hose) + return hose->private_data; + + return NULL; +} + #endif /* __POWERNV_PCI_H */ -- cgit v1.2.3 From 7a52ffabe867c0d93e47af113e5107340974047a Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:01 +1000 Subject: powerpc/powernv/pci: Always tear down DMA windows on PE release Currently we have these two functions: pnv_pci_ioda2_release_dma_pe(), and pnv_pci_ioda2_release_pe_dma() The first is used when tearing down VF PEs and the other is used for normal devices. There's very little difference between the two though. The latter (non-VF) will skip a call to pnv_pci_ioda2_unset_window() unless CONFIG_IOMMU_API=y is set. There's no real point in doing this so fold the two together. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-2-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 30 +++--------------------------- 1 file changed, 3 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 687919db0347..bfb40607aa0e 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1422,26 +1422,7 @@ m64_failed: return -EBUSY; } -static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, - int num); - -static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe) -{ - struct iommu_table *tbl; - int64_t rc; - - tbl = pe->table_group.tables[0]; - rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); - if (rc) - pe_warn(pe, "OPAL error %lld release DMA window\n", rc); - - pnv_pci_ioda2_set_bypass(pe, false); - if (pe->table_group.group) { - iommu_group_put(pe->table_group.group); - BUG_ON(pe->table_group.group); - } - iommu_tce_table_put(tbl); -} +static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe); static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) { @@ -1455,11 +1436,12 @@ static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) if (!pdev->is_physfn) return; + /* FIXME: Use pnv_ioda_release_pe()? */ list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { if (pe->parent_dev != pdev) continue; - pnv_pci_ioda2_release_dma_pe(pdev, pe); + pnv_pci_ioda2_release_pe_dma(pe); /* Remove from list */ mutex_lock(&phb->ioda.pe_list_mutex); @@ -2429,7 +2411,6 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) return 0; } -#if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV) static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, int num) { @@ -2453,7 +2434,6 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, return ret; } -#endif #ifdef CONFIG_IOMMU_API unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, @@ -3334,18 +3314,14 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = pe->table_group.tables[0]; unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); -#ifdef CONFIG_IOMMU_API int64_t rc; -#endif if (!weight) return; -#ifdef CONFIG_IOMMU_API rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); if (rc) pe_warn(pe, "OPAL error %lld release DMA window\n", rc); -#endif pnv_pci_ioda2_set_bypass(pe, false); if (pe->table_group.group) { -- cgit v1.2.3 From 01e12629af4e0e4864ed4d83e07783d7cb5b06be Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:02 +1000 Subject: powerpc/powernv/pci: Add explicit tracking of the DMA setup state There's an optimisation in the PE setup which skips performing DMA setup for a PE if we only have bridges in a PE. The assumption being that only "real" devices will DMA to system memory, which is probably fair. However, if we start off with only bridge devices in a PE then add a non-bridge device the new device won't be able to use DMA because we never configured it. Fix this (admittedly pretty weird) edge case by tracking whether we've done the DMA setup for the PE or not. If a non-bridge device is added to the PE (via rescan or hotplug, or whatever) we can set up DMA on demand. This also means the only remaining user of the old "DMA Weight" code is the IODA1 DMA setup code that it was originally added for, which is good. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-3-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 48 +++++++++++++++++++------------ arch/powerpc/platforms/powernv/pci.h | 7 +++++ 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index bfb40607aa0e..bb9c1cc60c33 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -141,6 +141,7 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) phb->ioda.pe_array[pe_no].phb = phb; phb->ioda.pe_array[pe_no].pe_number = pe_no; + phb->ioda.pe_array[pe_no].dma_setup_done = false; /* * Clear the PE frozen state as it might be put into frozen state @@ -1685,6 +1686,12 @@ static int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) } #endif /* CONFIG_PCI_IOV */ +static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe); + +static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe); + static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); @@ -1713,6 +1720,24 @@ static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); } + /* + * We assume that bridges *probably* don't need to do any DMA so we can + * skip allocating a TCE table, etc unless we get a non-bridge device. + */ + if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { + switch (phb->type) { + case PNV_PHB_IODA1: + pnv_pci_ioda1_setup_dma_pe(phb, pe); + break; + case PNV_PHB_IODA2: + pnv_pci_ioda2_setup_dma_pe(phb, pe); + break; + default: + pr_warn("%s: No DMA for PHB#%x (type %d)\n", + __func__, phb->hose->global_number, phb->type); + } + } + if (pdn) pdn->pe_number = pe->pe_number; pe->device_count++; @@ -2222,6 +2247,7 @@ found: pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; iommu_init_table(tbl, phb->hose->node, 0, 0); + pe->dma_setup_done = true; return; fail: /* XXX Failure: Try to fallback to 64-bit only ? */ @@ -2536,9 +2562,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, { int64_t rc; - if (!pnv_pci_ioda_pe_dma_weight(pe)) - return; - /* TVE #1 is selected by PCI address bit 59 */ pe->tce_bypass_base = 1ull << 59; @@ -2563,6 +2586,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, iommu_register_group(&pe->table_group, phb->hose->global_number, pe->pe_number); #endif + pe->dma_setup_done = true; } int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq) @@ -3136,7 +3160,6 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, static void pnv_pci_configure_bus(struct pci_bus *bus) { - struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pci_dev *bridge = bus->self; struct pnv_ioda_pe *pe; bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); @@ -3160,17 +3183,6 @@ static void pnv_pci_configure_bus(struct pci_bus *bus) return; pnv_ioda_setup_pe_seg(pe); - switch (phb->type) { - case PNV_PHB_IODA1: - pnv_pci_ioda1_setup_dma_pe(phb, pe); - break; - case PNV_PHB_IODA2: - pnv_pci_ioda2_setup_dma_pe(phb, pe); - break; - default: - pr_warn("%s: No DMA for PHB#%x (type %d)\n", - __func__, phb->hose->global_number, phb->type); - } } static resource_size_t pnv_pci_default_alignment(void) @@ -3289,11 +3301,10 @@ static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group, static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) { - unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); struct iommu_table *tbl = pe->table_group.tables[0]; int64_t rc; - if (!weight) + if (!pe->dma_setup_done) return; rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0); @@ -3313,10 +3324,9 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = pe->table_group.tables[0]; - unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe); int64_t rc; - if (!weight) + if (pe->dma_setup_done) return; rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 0727dec9a0d1..6aa6aefb637d 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -87,6 +87,13 @@ struct pnv_ioda_pe { bool tce_bypass_enabled; uint64_t tce_bypass_base; + /* + * Used to track whether we've done DMA setup for this PE or not. We + * want to defer allocating TCE tables, etc until we've added a + * non-bridge device to the PE. + */ + bool dma_setup_done; + /* MSIs. MVE index is identical for for 32 and 64 bit MSI * and -1 if not supported. (It's actually identical to the * PE number) -- cgit v1.2.3 From 369633654fcb9639cd4cd0e1a448ffde3533d776 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:03 +1000 Subject: powerpc/powernv/pci: Initialise M64 for IODA1 as a 1-1 window We pre-configure the m64 window for IODA1 as a 1-1 segment-PE mapping, similar to PHB3. Currently the actual mapping of segments occurs in pnv_ioda_pick_m64_pe(), but we can move it into pnv_ioda1_init_m64() and drop the IODA1 specific code paths in the PE setup / teardown. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-4-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 55 ++++++++++++++----------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index bb9c1cc60c33..8fb17676d914 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -311,6 +311,28 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb) } } + for (index = 0; index < phb->ioda.total_pe_num; index++) { + int64_t rc; + + /* + * P7IOC supports M64DT, which helps mapping M64 segment + * to one particular PE#. However, PHB3 has fixed mapping + * between M64 segment and PE#. In order to have same logic + * for P7IOC and PHB3, we enforce fixed mapping between M64 + * segment and PE# on P7IOC. + */ + rc = opal_pci_map_pe_mmio_window(phb->opal_id, + index, OPAL_M64_WINDOW_TYPE, + index / PNV_IODA1_M64_SEGS, + index % PNV_IODA1_M64_SEGS); + if (rc != OPAL_SUCCESS) { + pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n", + __func__, rc, phb->hose->global_number, + index); + goto fail; + } + } + /* * Exclude the segments for reserved and root bus PE, which * are first or last two PEs. @@ -402,26 +424,6 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) pe->master = master_pe; list_add_tail(&pe->list, &master_pe->slaves); } - - /* - * P7IOC supports M64DT, which helps mapping M64 segment - * to one particular PE#. However, PHB3 has fixed mapping - * between M64 segment and PE#. In order to have same logic - * for P7IOC and PHB3, we enforce fixed mapping between M64 - * segment and PE# on P7IOC. - */ - if (phb->type == PNV_PHB_IODA1) { - int64_t rc; - - rc = opal_pci_map_pe_mmio_window(phb->opal_id, - pe->pe_number, OPAL_M64_WINDOW_TYPE, - pe->pe_number / PNV_IODA1_M64_SEGS, - pe->pe_number % PNV_IODA1_M64_SEGS); - if (rc != OPAL_SUCCESS) - pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n", - __func__, rc, phb->hose->global_number, - pe->pe_number); - } } kfree(pe_alloc); @@ -3354,14 +3356,8 @@ static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, if (map[idx] != pe->pe_number) continue; - if (win == OPAL_M64_WINDOW_TYPE) - rc = opal_pci_map_pe_mmio_window(phb->opal_id, - phb->ioda.reserved_pe_idx, win, - idx / PNV_IODA1_M64_SEGS, - idx % PNV_IODA1_M64_SEGS); - else - rc = opal_pci_map_pe_mmio_window(phb->opal_id, - phb->ioda.reserved_pe_idx, win, 0, idx); + rc = opal_pci_map_pe_mmio_window(phb->opal_id, + phb->ioda.reserved_pe_idx, win, 0, idx); if (rc != OPAL_SUCCESS) pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n", @@ -3380,8 +3376,7 @@ static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) phb->ioda.io_segmap); pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, phb->ioda.m32_segmap); - pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE, - phb->ioda.m64_segmap); + /* M64 is pre-configured by pnv_ioda1_init_m64() */ } else if (phb->type == PNV_PHB_IODA2) { pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, phb->ioda.m32_segmap); -- cgit v1.2.3 From 37b59ef08c546c6f54cdc52eed749f494619a102 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:04 +1000 Subject: powerpc/powernv/sriov: Move SR-IOV into a separate file pci-ioda.c is getting a bit unwieldly due to the amount of stuff jammed in there. The SR-IOV support can be extracted easily enough and is mostly standalone, so move it into a separate file. This patch also moves the PowerNV SR-IOV specific fields from pci_dn and moves them into a platform specific structure. I'm not sure how they ended up in there in the first place, but leaking platform specifics into common code has proven to be a terrible idea so far so lets stop doing that. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-5-oohall@gmail.com --- arch/powerpc/include/asm/device.h | 3 + arch/powerpc/platforms/powernv/Makefile | 1 + arch/powerpc/platforms/powernv/pci-ioda.c | 673 +---------------------------- arch/powerpc/platforms/powernv/pci-sriov.c | 640 +++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/pci.h | 74 ++++ 5 files changed, 736 insertions(+), 655 deletions(-) create mode 100644 arch/powerpc/platforms/powernv/pci-sriov.c diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 266542769e4b..4d8934db7ef5 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -49,6 +49,9 @@ struct dev_archdata { #ifdef CONFIG_CXL_BASE struct cxl_context *cxl_ctx; #endif +#ifdef CONFIG_PCI_IOV + void *iov_data; +#endif }; struct pdev_archdata { diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index fe3f0fb5aeca..2eb6ae150d1f 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_FA_DUMP) += opal-fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o obj-$(CONFIG_OPAL_CORE) += opal-core.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o +obj-$(CONFIG_PCI_IOV) += pci-sriov.o obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 8fb17676d914..2d36a9ebf0e9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -115,26 +115,6 @@ static int __init pci_reset_phbs_setup(char *str) early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup); -static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r) -{ - /* - * WARNING: We cannot rely on the resource flags. The Linux PCI - * allocation code sometimes decides to put a 64-bit prefetchable - * BAR in the 32-bit window, so we have to compare the addresses. - * - * For simplicity we only test resource start. - */ - return (r->start >= phb->ioda.m64_base && - r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); -} - -static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags) -{ - unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); - - return (resource_flags & flags) == flags; -} - static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) { s64 rc; @@ -172,7 +152,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) pnv_ioda_init_pe(phb, pe_no); } -static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) +struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) { long pe; @@ -184,7 +164,7 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) return NULL; } -static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) +void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) { struct pnv_phb *phb = pe->phb; unsigned int pe_num = pe->pe_number; @@ -816,7 +796,7 @@ static void pnv_ioda_unset_peltv(struct pnv_phb *phb, pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc); } -static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) +int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pci_dev *parent; uint8_t bcomp, dcomp, fcomp; @@ -887,7 +867,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) return 0; } -static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) +int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pci_dev *parent; uint8_t bcomp, dcomp, fcomp; @@ -982,91 +962,6 @@ out: return 0; } -#ifdef CONFIG_PCI_IOV -static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) -{ - struct pci_dn *pdn = pci_get_pdn(dev); - int i; - struct resource *res, res2; - resource_size_t size; - u16 num_vfs; - - if (!dev->is_physfn) - return -EINVAL; - - /* - * "offset" is in VFs. The M64 windows are sized so that when they - * are segmented, each segment is the same size as the IOV BAR. - * Each segment is in a separate PE, and the high order bits of the - * address are the PE number. Therefore, each VF's BAR is in a - * separate PE, and changing the IOV BAR start address changes the - * range of PEs the VFs are in. - */ - num_vfs = pdn->num_vfs; - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; - if (!res->flags || !res->parent) - continue; - - /* - * The actual IOV BAR range is determined by the start address - * and the actual size for num_vfs VFs BAR. This check is to - * make sure that after shifting, the range will not overlap - * with another device. - */ - size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); - res2.flags = res->flags; - res2.start = res->start + (size * offset); - res2.end = res2.start + (size * num_vfs) - 1; - - if (res2.end > res->end) { - dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n", - i, &res2, res, num_vfs, offset); - return -EBUSY; - } - } - - /* - * Since M64 BAR shares segments among all possible 256 PEs, - * we have to shift the beginning of PF IOV BAR to make it start from - * the segment which belongs to the PE number assigned to the first VF. - * This creates a "hole" in the /proc/iomem which could be used for - * allocating other resources so we reserve this area below and - * release when IOV is released. - */ - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; - if (!res->flags || !res->parent) - continue; - - size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); - res2 = *res; - res->start += size * offset; - - dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n", - i, &res2, res, (offset > 0) ? "En" : "Dis", - num_vfs, offset); - - if (offset < 0) { - devm_release_resource(&dev->dev, &pdn->holes[i]); - memset(&pdn->holes[i], 0, sizeof(pdn->holes[i])); - } - - pci_update_resource(dev, i + PCI_IOV_RESOURCES); - - if (offset > 0) { - pdn->holes[i].start = res2.start; - pdn->holes[i].end = res2.start + size * offset - 1; - pdn->holes[i].flags = IORESOURCE_BUS; - pdn->holes[i].name = "pnv_iov_reserved"; - devm_request_resource(&dev->dev, res->parent, - &pdn->holes[i]); - } - } - return 0; -} -#endif /* CONFIG_PCI_IOV */ - static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); @@ -1294,406 +1189,9 @@ static void pnv_pci_ioda_setup_nvlink(void) #endif } -#ifdef CONFIG_PCI_IOV -static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) -{ - struct pnv_phb *phb; - struct pci_dn *pdn; - int i, j; - int m64_bars; - - phb = pci_bus_to_pnvhb(pdev->bus); - pdn = pci_get_pdn(pdev); - - if (pdn->m64_single_mode) - m64_bars = num_vfs; - else - m64_bars = 1; - - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) - for (j = 0; j < m64_bars; j++) { - if (pdn->m64_map[j][i] == IODA_INVALID_M64) - continue; - opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0); - clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc); - pdn->m64_map[j][i] = IODA_INVALID_M64; - } - - kfree(pdn->m64_map); - return 0; -} - -static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) -{ - struct pnv_phb *phb; - struct pci_dn *pdn; - unsigned int win; - struct resource *res; - int i, j; - int64_t rc; - int total_vfs; - resource_size_t size, start; - int pe_num; - int m64_bars; - - phb = pci_bus_to_pnvhb(pdev->bus); - pdn = pci_get_pdn(pdev); - total_vfs = pci_sriov_get_totalvfs(pdev); - - if (pdn->m64_single_mode) - m64_bars = num_vfs; - else - m64_bars = 1; - - pdn->m64_map = kmalloc_array(m64_bars, - sizeof(*pdn->m64_map), - GFP_KERNEL); - if (!pdn->m64_map) - return -ENOMEM; - /* Initialize the m64_map to IODA_INVALID_M64 */ - for (i = 0; i < m64_bars ; i++) - for (j = 0; j < PCI_SRIOV_NUM_BARS; j++) - pdn->m64_map[i][j] = IODA_INVALID_M64; - - - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &pdev->resource[i + PCI_IOV_RESOURCES]; - if (!res->flags || !res->parent) - continue; - - for (j = 0; j < m64_bars; j++) { - do { - win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, - phb->ioda.m64_bar_idx + 1, 0); - - if (win >= phb->ioda.m64_bar_idx + 1) - goto m64_failed; - } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); - - pdn->m64_map[j][i] = win; - - if (pdn->m64_single_mode) { - size = pci_iov_resource_size(pdev, - PCI_IOV_RESOURCES + i); - start = res->start + size * j; - } else { - size = resource_size(res); - start = res->start; - } - - /* Map the M64 here */ - if (pdn->m64_single_mode) { - pe_num = pdn->pe_num_map[j]; - rc = opal_pci_map_pe_mmio_window(phb->opal_id, - pe_num, OPAL_M64_WINDOW_TYPE, - pdn->m64_map[j][i], 0); - } - - rc = opal_pci_set_phb_mem_window(phb->opal_id, - OPAL_M64_WINDOW_TYPE, - pdn->m64_map[j][i], - start, - 0, /* unused */ - size); - - - if (rc != OPAL_SUCCESS) { - dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n", - win, rc); - goto m64_failed; - } - - if (pdn->m64_single_mode) - rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2); - else - rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1); - - if (rc != OPAL_SUCCESS) { - dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n", - win, rc); - goto m64_failed; - } - } - } - return 0; - -m64_failed: - pnv_pci_vf_release_m64(pdev, num_vfs); - return -EBUSY; -} - -static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe); - -static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) -{ - struct pnv_phb *phb; - struct pnv_ioda_pe *pe, *pe_n; - struct pci_dn *pdn; - - phb = pci_bus_to_pnvhb(pdev->bus); - pdn = pci_get_pdn(pdev); - - if (!pdev->is_physfn) - return; - - /* FIXME: Use pnv_ioda_release_pe()? */ - list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { - if (pe->parent_dev != pdev) - continue; - - pnv_pci_ioda2_release_pe_dma(pe); - - /* Remove from list */ - mutex_lock(&phb->ioda.pe_list_mutex); - list_del(&pe->list); - mutex_unlock(&phb->ioda.pe_list_mutex); - - pnv_ioda_deconfigure_pe(phb, pe); - - pnv_ioda_free_pe(pe); - } -} - -static void pnv_pci_sriov_disable(struct pci_dev *pdev) -{ - struct pnv_phb *phb; - struct pnv_ioda_pe *pe; - struct pci_dn *pdn; - u16 num_vfs, i; - - phb = pci_bus_to_pnvhb(pdev->bus); - pdn = pci_get_pdn(pdev); - num_vfs = pdn->num_vfs; - - /* Release VF PEs */ - pnv_ioda_release_vf_PE(pdev); - - if (phb->type == PNV_PHB_IODA2) { - if (!pdn->m64_single_mode) - pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map); - - /* Release M64 windows */ - pnv_pci_vf_release_m64(pdev, num_vfs); - - /* Release PE numbers */ - if (pdn->m64_single_mode) { - for (i = 0; i < num_vfs; i++) { - if (pdn->pe_num_map[i] == IODA_INVALID_PE) - continue; - - pe = &phb->ioda.pe_array[pdn->pe_num_map[i]]; - pnv_ioda_free_pe(pe); - } - } else - bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); - /* Releasing pe_num_map */ - kfree(pdn->pe_num_map); - } -} - -static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe); -static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) -{ - struct pnv_phb *phb; - struct pnv_ioda_pe *pe; - int pe_num; - u16 vf_index; - struct pci_dn *pdn; - - phb = pci_bus_to_pnvhb(pdev->bus); - pdn = pci_get_pdn(pdev); - - if (!pdev->is_physfn) - return; - - /* Reserve PE for each VF */ - for (vf_index = 0; vf_index < num_vfs; vf_index++) { - int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index); - int vf_bus = pci_iov_virtfn_bus(pdev, vf_index); - struct pci_dn *vf_pdn; - - if (pdn->m64_single_mode) - pe_num = pdn->pe_num_map[vf_index]; - else - pe_num = *pdn->pe_num_map + vf_index; - - pe = &phb->ioda.pe_array[pe_num]; - pe->pe_number = pe_num; - pe->phb = phb; - pe->flags = PNV_IODA_PE_VF; - pe->pbus = NULL; - pe->parent_dev = pdev; - pe->mve_number = -1; - pe->rid = (vf_bus << 8) | vf_devfn; - - pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", - pci_domain_nr(pdev->bus), pdev->bus->number, - PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num); - - if (pnv_ioda_configure_pe(phb, pe)) { - /* XXX What do we do here ? */ - pnv_ioda_free_pe(pe); - pe->pdev = NULL; - continue; - } - - /* Put PE to the list */ - mutex_lock(&phb->ioda.pe_list_mutex); - list_add_tail(&pe->list, &phb->ioda.pe_list); - mutex_unlock(&phb->ioda.pe_list_mutex); - - /* associate this pe to it's pdn */ - list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { - if (vf_pdn->busno == vf_bus && - vf_pdn->devfn == vf_devfn) { - vf_pdn->pe_number = pe_num; - break; - } - } - - pnv_pci_ioda2_setup_dma_pe(phb, pe); - } -} - -static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) -{ - struct pnv_phb *phb; - struct pnv_ioda_pe *pe; - struct pci_dn *pdn; - int ret; - u16 i; - - phb = pci_bus_to_pnvhb(pdev->bus); - pdn = pci_get_pdn(pdev); - - if (phb->type == PNV_PHB_IODA2) { - if (!pdn->vfs_expanded) { - dev_info(&pdev->dev, "don't support this SRIOV device" - " with non 64bit-prefetchable IOV BAR\n"); - return -ENOSPC; - } - - /* - * When M64 BARs functions in Single PE mode, the number of VFs - * could be enabled must be less than the number of M64 BARs. - */ - if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) { - dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n"); - return -EBUSY; - } - - /* Allocating pe_num_map */ - if (pdn->m64_single_mode) - pdn->pe_num_map = kmalloc_array(num_vfs, - sizeof(*pdn->pe_num_map), - GFP_KERNEL); - else - pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL); - - if (!pdn->pe_num_map) - return -ENOMEM; - - if (pdn->m64_single_mode) - for (i = 0; i < num_vfs; i++) - pdn->pe_num_map[i] = IODA_INVALID_PE; - - /* Calculate available PE for required VFs */ - if (pdn->m64_single_mode) { - for (i = 0; i < num_vfs; i++) { - pe = pnv_ioda_alloc_pe(phb); - if (!pe) { - ret = -EBUSY; - goto m64_failed; - } - - pdn->pe_num_map[i] = pe->pe_number; - } - } else { - mutex_lock(&phb->ioda.pe_alloc_mutex); - *pdn->pe_num_map = bitmap_find_next_zero_area( - phb->ioda.pe_alloc, phb->ioda.total_pe_num, - 0, num_vfs, 0); - if (*pdn->pe_num_map >= phb->ioda.total_pe_num) { - mutex_unlock(&phb->ioda.pe_alloc_mutex); - dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs); - kfree(pdn->pe_num_map); - return -EBUSY; - } - bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); - mutex_unlock(&phb->ioda.pe_alloc_mutex); - } - pdn->num_vfs = num_vfs; - - /* Assign M64 window accordingly */ - ret = pnv_pci_vf_assign_m64(pdev, num_vfs); - if (ret) { - dev_info(&pdev->dev, "Not enough M64 window resources\n"); - goto m64_failed; - } - - /* - * When using one M64 BAR to map one IOV BAR, we need to shift - * the IOV BAR according to the PE# allocated to the VFs. - * Otherwise, the PE# for the VF will conflict with others. - */ - if (!pdn->m64_single_mode) { - ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map); - if (ret) - goto m64_failed; - } - } - - /* Setup VF PEs */ - pnv_ioda_setup_vf_PE(pdev, num_vfs); - - return 0; - -m64_failed: - if (pdn->m64_single_mode) { - for (i = 0; i < num_vfs; i++) { - if (pdn->pe_num_map[i] == IODA_INVALID_PE) - continue; - - pe = &phb->ioda.pe_array[pdn->pe_num_map[i]]; - pnv_ioda_free_pe(pe); - } - } else - bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); - - /* Releasing pe_num_map */ - kfree(pdn->pe_num_map); - - return ret; -} - -static int pnv_pcibios_sriov_disable(struct pci_dev *pdev) -{ - pnv_pci_sriov_disable(pdev); - - /* Release PCI data */ - remove_sriov_vf_pdns(pdev); - return 0; -} - -static int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) -{ - /* Allocate PCI data */ - add_sriov_vf_pdns(pdev); - - return pnv_pci_sriov_enable(pdev, num_vfs); -} -#endif /* CONFIG_PCI_IOV */ - static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); -static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe); - static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); @@ -2559,8 +2057,8 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = { }; #endif -static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, - struct pnv_ioda_pe *pe) +void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe) { int64_t rc; @@ -2737,117 +2235,6 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) count, phb->msi_base); } -#ifdef CONFIG_PCI_IOV -static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) -{ - struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); - const resource_size_t gate = phb->ioda.m64_segsize >> 2; - struct resource *res; - int i; - resource_size_t size, total_vf_bar_sz; - struct pci_dn *pdn; - int mul, total_vfs; - - pdn = pci_get_pdn(pdev); - pdn->vfs_expanded = 0; - pdn->m64_single_mode = false; - - total_vfs = pci_sriov_get_totalvfs(pdev); - mul = phb->ioda.total_pe_num; - total_vf_bar_sz = 0; - - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &pdev->resource[i + PCI_IOV_RESOURCES]; - if (!res->flags || res->parent) - continue; - if (!pnv_pci_is_m64_flags(res->flags)) { - dev_warn(&pdev->dev, "Don't support SR-IOV with" - " non M64 VF BAR%d: %pR. \n", - i, res); - goto truncate_iov; - } - - total_vf_bar_sz += pci_iov_resource_size(pdev, - i + PCI_IOV_RESOURCES); - - /* - * If bigger than quarter of M64 segment size, just round up - * power of two. - * - * Generally, one M64 BAR maps one IOV BAR. To avoid conflict - * with other devices, IOV BAR size is expanded to be - * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64 - * segment size , the expanded size would equal to half of the - * whole M64 space size, which will exhaust the M64 Space and - * limit the system flexibility. This is a design decision to - * set the boundary to quarter of the M64 segment size. - */ - if (total_vf_bar_sz > gate) { - mul = roundup_pow_of_two(total_vfs); - dev_info(&pdev->dev, - "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n", - total_vf_bar_sz, gate, mul); - pdn->m64_single_mode = true; - break; - } - } - - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &pdev->resource[i + PCI_IOV_RESOURCES]; - if (!res->flags || res->parent) - continue; - - size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES); - /* - * On PHB3, the minimum size alignment of M64 BAR in single - * mode is 32MB. - */ - if (pdn->m64_single_mode && (size < SZ_32M)) - goto truncate_iov; - dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res); - res->end = res->start + size * mul - 1; - dev_dbg(&pdev->dev, " %pR\n", res); - dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)", - i, res, mul); - } - pdn->vfs_expanded = mul; - - return; - -truncate_iov: - /* To save MMIO space, IOV BAR is truncated. */ - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &pdev->resource[i + PCI_IOV_RESOURCES]; - res->flags = 0; - res->end = res->start - 1; - } -} - -static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) -{ - if (WARN_ON(pci_dev_is_added(pdev))) - return; - - if (pdev->is_virtfn) { - struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev); - - /* - * VF PEs are single-device PEs so their pdev pointer needs to - * be set. The pdev doesn't exist when the PE is allocated (in - * (pcibios_sriov_enable()) so we fix it up here. - */ - pe->pdev = pdev; - WARN_ON(!(pe->flags & PNV_IODA_PE_VF)); - } else if (pdev->is_physfn) { - /* - * For PFs adjust their allocated IOV resources to match what - * the PHB can support using it's M64 BAR table. - */ - pnv_pci_ioda_fixup_iov_resources(pdev); - } -} -#endif /* CONFIG_PCI_IOV */ - static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, struct resource *res) { @@ -3192,41 +2579,6 @@ static resource_size_t pnv_pci_default_alignment(void) return PAGE_SIZE; } -#ifdef CONFIG_PCI_IOV -static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, - int resno) -{ - struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); - struct pci_dn *pdn = pci_get_pdn(pdev); - resource_size_t align; - - /* - * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the - * SR-IOV. While from hardware perspective, the range mapped by M64 - * BAR should be size aligned. - * - * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra - * powernv-specific hardware restriction is gone. But if just use the - * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with - * in one segment of M64 #15, which introduces the PE conflict between - * PF and VF. Based on this, the minimum alignment of an IOV BAR is - * m64_segsize. - * - * This function returns the total IOV BAR size if M64 BAR is in - * Shared PE mode or just VF BAR size if not. - * If the M64 BAR is in Single PE mode, return the VF BAR size or - * M64 segment size if IOV BAR size is less. - */ - align = pci_iov_resource_size(pdev, resno); - if (!pdn->vfs_expanded) - return align; - if (pdn->m64_single_mode) - return max(align, (resource_size_t)phb->ioda.m64_segsize); - - return pdn->vfs_expanded * align; -} -#endif /* CONFIG_PCI_IOV */ - /* Prevent enabling devices for which we couldn't properly * assign a PE */ @@ -3323,7 +2675,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) iommu_tce_table_put(tbl); } -static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) +void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = pe->table_group.tables[0]; int64_t rc; @@ -3436,12 +2788,23 @@ static void pnv_pci_release_device(struct pci_dev *pdev) struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; + /* The VF PE state is torn down when sriov_disable() is called */ if (pdev->is_virtfn) return; if (!pdn || pdn->pe_number == IODA_INVALID_PE) return; +#ifdef CONFIG_PCI_IOV + /* + * FIXME: Try move this to sriov_disable(). It's here since we allocate + * the iov state at probe time since we need to fiddle with the IOV + * resources. + */ + if (pdev->is_physfn) + kfree(pdev->dev.archdata.iov_data); +#endif + /* * PCI hotplug can happen as part of EEH error recovery. The @pdn * isn't removed and added afterwards in this scenario. We should diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c new file mode 100644 index 000000000000..c5b7e20286c6 --- /dev/null +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include + +#include "pci.h" + +/* for pci_dev_is_added() */ +#include "../../../../drivers/pci/pci.h" + + +static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) +{ + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); + const resource_size_t gate = phb->ioda.m64_segsize >> 2; + struct resource *res; + int i; + resource_size_t size, total_vf_bar_sz; + struct pnv_iov_data *iov; + int mul, total_vfs; + + iov = kzalloc(sizeof(*iov), GFP_KERNEL); + if (!iov) + goto truncate_iov; + pdev->dev.archdata.iov_data = iov; + + total_vfs = pci_sriov_get_totalvfs(pdev); + mul = phb->ioda.total_pe_num; + total_vf_bar_sz = 0; + + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + res = &pdev->resource[i + PCI_IOV_RESOURCES]; + if (!res->flags || res->parent) + continue; + if (!pnv_pci_is_m64_flags(res->flags)) { + dev_warn(&pdev->dev, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n", + i, res); + goto truncate_iov; + } + + total_vf_bar_sz += pci_iov_resource_size(pdev, + i + PCI_IOV_RESOURCES); + + /* + * If bigger than quarter of M64 segment size, just round up + * power of two. + * + * Generally, one M64 BAR maps one IOV BAR. To avoid conflict + * with other devices, IOV BAR size is expanded to be + * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64 + * segment size , the expanded size would equal to half of the + * whole M64 space size, which will exhaust the M64 Space and + * limit the system flexibility. This is a design decision to + * set the boundary to quarter of the M64 segment size. + */ + if (total_vf_bar_sz > gate) { + mul = roundup_pow_of_two(total_vfs); + dev_info(&pdev->dev, + "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n", + total_vf_bar_sz, gate, mul); + iov->m64_single_mode = true; + break; + } + } + + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + res = &pdev->resource[i + PCI_IOV_RESOURCES]; + if (!res->flags || res->parent) + continue; + + size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES); + /* + * On PHB3, the minimum size alignment of M64 BAR in single + * mode is 32MB. + */ + if (iov->m64_single_mode && (size < SZ_32M)) + goto truncate_iov; + dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res); + res->end = res->start + size * mul - 1; + dev_dbg(&pdev->dev, " %pR\n", res); + dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)", + i, res, mul); + } + iov->vfs_expanded = mul; + + return; + +truncate_iov: + /* To save MMIO space, IOV BAR is truncated. */ + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + res = &pdev->resource[i + PCI_IOV_RESOURCES]; + res->flags = 0; + res->end = res->start - 1; + } + + pdev->dev.archdata.iov_data = NULL; + kfree(iov); +} + +void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) +{ + if (WARN_ON(pci_dev_is_added(pdev))) + return; + + if (pdev->is_virtfn) { + struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev); + + /* + * VF PEs are single-device PEs so their pdev pointer needs to + * be set. The pdev doesn't exist when the PE is allocated (in + * (pcibios_sriov_enable()) so we fix it up here. + */ + pe->pdev = pdev; + WARN_ON(!(pe->flags & PNV_IODA_PE_VF)); + } else if (pdev->is_physfn) { + /* + * For PFs adjust their allocated IOV resources to match what + * the PHB can support using it's M64 BAR table. + */ + pnv_pci_ioda_fixup_iov_resources(pdev); + } +} + +resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, + int resno) +{ + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); + struct pnv_iov_data *iov = pnv_iov_get(pdev); + resource_size_t align; + + /* + * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the + * SR-IOV. While from hardware perspective, the range mapped by M64 + * BAR should be size aligned. + * + * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra + * powernv-specific hardware restriction is gone. But if just use the + * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with + * in one segment of M64 #15, which introduces the PE conflict between + * PF and VF. Based on this, the minimum alignment of an IOV BAR is + * m64_segsize. + * + * This function returns the total IOV BAR size if M64 BAR is in + * Shared PE mode or just VF BAR size if not. + * If the M64 BAR is in Single PE mode, return the VF BAR size or + * M64 segment size if IOV BAR size is less. + */ + align = pci_iov_resource_size(pdev, resno); + + /* + * iov can be null if we have an SR-IOV device with IOV BAR that can't + * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch). + * In that case we don't allow VFs to be enabled so just return the + * default alignment. + */ + if (!iov) + return align; + if (!iov->vfs_expanded) + return align; + if (iov->m64_single_mode) + return max(align, (resource_size_t)phb->ioda.m64_segsize); + + return iov->vfs_expanded * align; +} + +static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) +{ + struct pnv_iov_data *iov; + struct pnv_phb *phb; + int i, j; + int m64_bars; + + phb = pci_bus_to_pnvhb(pdev->bus); + iov = pnv_iov_get(pdev); + + if (iov->m64_single_mode) + m64_bars = num_vfs; + else + m64_bars = 1; + + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) + for (j = 0; j < m64_bars; j++) { + if (iov->m64_map[j][i] == IODA_INVALID_M64) + continue; + opal_pci_phb_mmio_enable(phb->opal_id, + OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 0); + clear_bit(iov->m64_map[j][i], &phb->ioda.m64_bar_alloc); + iov->m64_map[j][i] = IODA_INVALID_M64; + } + + kfree(iov->m64_map); + return 0; +} + +static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) +{ + struct pnv_iov_data *iov; + struct pnv_phb *phb; + unsigned int win; + struct resource *res; + int i, j; + int64_t rc; + int total_vfs; + resource_size_t size, start; + int pe_num; + int m64_bars; + + phb = pci_bus_to_pnvhb(pdev->bus); + iov = pnv_iov_get(pdev); + total_vfs = pci_sriov_get_totalvfs(pdev); + + if (iov->m64_single_mode) + m64_bars = num_vfs; + else + m64_bars = 1; + + iov->m64_map = kmalloc_array(m64_bars, + sizeof(*iov->m64_map), + GFP_KERNEL); + if (!iov->m64_map) + return -ENOMEM; + /* Initialize the m64_map to IODA_INVALID_M64 */ + for (i = 0; i < m64_bars ; i++) + for (j = 0; j < PCI_SRIOV_NUM_BARS; j++) + iov->m64_map[i][j] = IODA_INVALID_M64; + + + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + res = &pdev->resource[i + PCI_IOV_RESOURCES]; + if (!res->flags || !res->parent) + continue; + + for (j = 0; j < m64_bars; j++) { + do { + win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, + phb->ioda.m64_bar_idx + 1, 0); + + if (win >= phb->ioda.m64_bar_idx + 1) + goto m64_failed; + } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); + + iov->m64_map[j][i] = win; + + if (iov->m64_single_mode) { + size = pci_iov_resource_size(pdev, + PCI_IOV_RESOURCES + i); + start = res->start + size * j; + } else { + size = resource_size(res); + start = res->start; + } + + /* Map the M64 here */ + if (iov->m64_single_mode) { + pe_num = iov->pe_num_map[j]; + rc = opal_pci_map_pe_mmio_window(phb->opal_id, + pe_num, OPAL_M64_WINDOW_TYPE, + iov->m64_map[j][i], 0); + } + + rc = opal_pci_set_phb_mem_window(phb->opal_id, + OPAL_M64_WINDOW_TYPE, + iov->m64_map[j][i], + start, + 0, /* unused */ + size); + + + if (rc != OPAL_SUCCESS) { + dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n", + win, rc); + goto m64_failed; + } + + if (iov->m64_single_mode) + rc = opal_pci_phb_mmio_enable(phb->opal_id, + OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 2); + else + rc = opal_pci_phb_mmio_enable(phb->opal_id, + OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 1); + + if (rc != OPAL_SUCCESS) { + dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n", + win, rc); + goto m64_failed; + } + } + } + return 0; + +m64_failed: + pnv_pci_vf_release_m64(pdev, num_vfs); + return -EBUSY; +} + +static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) +{ + struct pnv_phb *phb; + struct pnv_ioda_pe *pe, *pe_n; + + phb = pci_bus_to_pnvhb(pdev->bus); + + if (!pdev->is_physfn) + return; + + /* FIXME: Use pnv_ioda_release_pe()? */ + list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { + if (pe->parent_dev != pdev) + continue; + + pnv_pci_ioda2_release_pe_dma(pe); + + /* Remove from list */ + mutex_lock(&phb->ioda.pe_list_mutex); + list_del(&pe->list); + mutex_unlock(&phb->ioda.pe_list_mutex); + + pnv_ioda_deconfigure_pe(phb, pe); + + pnv_ioda_free_pe(pe); + } +} + +static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) +{ + struct resource *res, res2; + struct pnv_iov_data *iov; + resource_size_t size; + u16 num_vfs; + int i; + + if (!dev->is_physfn) + return -EINVAL; + iov = pnv_iov_get(dev); + + /* + * "offset" is in VFs. The M64 windows are sized so that when they + * are segmented, each segment is the same size as the IOV BAR. + * Each segment is in a separate PE, and the high order bits of the + * address are the PE number. Therefore, each VF's BAR is in a + * separate PE, and changing the IOV BAR start address changes the + * range of PEs the VFs are in. + */ + num_vfs = iov->num_vfs; + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + res = &dev->resource[i + PCI_IOV_RESOURCES]; + if (!res->flags || !res->parent) + continue; + + /* + * The actual IOV BAR range is determined by the start address + * and the actual size for num_vfs VFs BAR. This check is to + * make sure that after shifting, the range will not overlap + * with another device. + */ + size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); + res2.flags = res->flags; + res2.start = res->start + (size * offset); + res2.end = res2.start + (size * num_vfs) - 1; + + if (res2.end > res->end) { + dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n", + i, &res2, res, num_vfs, offset); + return -EBUSY; + } + } + + /* + * Since M64 BAR shares segments among all possible 256 PEs, + * we have to shift the beginning of PF IOV BAR to make it start from + * the segment which belongs to the PE number assigned to the first VF. + * This creates a "hole" in the /proc/iomem which could be used for + * allocating other resources so we reserve this area below and + * release when IOV is released. + */ + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + res = &dev->resource[i + PCI_IOV_RESOURCES]; + if (!res->flags || !res->parent) + continue; + + size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); + res2 = *res; + res->start += size * offset; + + dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n", + i, &res2, res, (offset > 0) ? "En" : "Dis", + num_vfs, offset); + + if (offset < 0) { + devm_release_resource(&dev->dev, &iov->holes[i]); + memset(&iov->holes[i], 0, sizeof(iov->holes[i])); + } + + pci_update_resource(dev, i + PCI_IOV_RESOURCES); + + if (offset > 0) { + iov->holes[i].start = res2.start; + iov->holes[i].end = res2.start + size * offset - 1; + iov->holes[i].flags = IORESOURCE_BUS; + iov->holes[i].name = "pnv_iov_reserved"; + devm_request_resource(&dev->dev, res->parent, + &iov->holes[i]); + } + } + return 0; +} + +static void pnv_pci_sriov_disable(struct pci_dev *pdev) +{ + struct pnv_phb *phb; + struct pnv_ioda_pe *pe; + struct pnv_iov_data *iov; + u16 num_vfs, i; + + phb = pci_bus_to_pnvhb(pdev->bus); + iov = pnv_iov_get(pdev); + num_vfs = iov->num_vfs; + + /* Release VF PEs */ + pnv_ioda_release_vf_PE(pdev); + + if (phb->type == PNV_PHB_IODA2) { + if (!iov->m64_single_mode) + pnv_pci_vf_resource_shift(pdev, -*iov->pe_num_map); + + /* Release M64 windows */ + pnv_pci_vf_release_m64(pdev, num_vfs); + + /* Release PE numbers */ + if (iov->m64_single_mode) { + for (i = 0; i < num_vfs; i++) { + if (iov->pe_num_map[i] == IODA_INVALID_PE) + continue; + + pe = &phb->ioda.pe_array[iov->pe_num_map[i]]; + pnv_ioda_free_pe(pe); + } + } else + bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs); + /* Releasing pe_num_map */ + kfree(iov->pe_num_map); + } +} + +static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) +{ + struct pnv_phb *phb; + struct pnv_ioda_pe *pe; + int pe_num; + u16 vf_index; + struct pnv_iov_data *iov; + struct pci_dn *pdn; + + if (!pdev->is_physfn) + return; + + phb = pci_bus_to_pnvhb(pdev->bus); + pdn = pci_get_pdn(pdev); + iov = pnv_iov_get(pdev); + + /* Reserve PE for each VF */ + for (vf_index = 0; vf_index < num_vfs; vf_index++) { + int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index); + int vf_bus = pci_iov_virtfn_bus(pdev, vf_index); + struct pci_dn *vf_pdn; + + if (iov->m64_single_mode) + pe_num = iov->pe_num_map[vf_index]; + else + pe_num = *iov->pe_num_map + vf_index; + + pe = &phb->ioda.pe_array[pe_num]; + pe->pe_number = pe_num; + pe->phb = phb; + pe->flags = PNV_IODA_PE_VF; + pe->pbus = NULL; + pe->parent_dev = pdev; + pe->mve_number = -1; + pe->rid = (vf_bus << 8) | vf_devfn; + + pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", + pci_domain_nr(pdev->bus), pdev->bus->number, + PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num); + + if (pnv_ioda_configure_pe(phb, pe)) { + /* XXX What do we do here ? */ + pnv_ioda_free_pe(pe); + pe->pdev = NULL; + continue; + } + + /* Put PE to the list */ + mutex_lock(&phb->ioda.pe_list_mutex); + list_add_tail(&pe->list, &phb->ioda.pe_list); + mutex_unlock(&phb->ioda.pe_list_mutex); + + /* associate this pe to it's pdn */ + list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { + if (vf_pdn->busno == vf_bus && + vf_pdn->devfn == vf_devfn) { + vf_pdn->pe_number = pe_num; + break; + } + } + + pnv_pci_ioda2_setup_dma_pe(phb, pe); + } +} + +static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +{ + struct pnv_iov_data *iov; + struct pnv_phb *phb; + struct pnv_ioda_pe *pe; + int ret; + u16 i; + + phb = pci_bus_to_pnvhb(pdev->bus); + iov = pnv_iov_get(pdev); + + if (phb->type == PNV_PHB_IODA2) { + if (!iov->vfs_expanded) { + dev_info(&pdev->dev, + "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n"); + return -ENOSPC; + } + + /* + * When M64 BARs functions in Single PE mode, the number of VFs + * could be enabled must be less than the number of M64 BARs. + */ + if (iov->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) { + dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n"); + return -EBUSY; + } + + /* Allocating pe_num_map */ + if (iov->m64_single_mode) + iov->pe_num_map = kmalloc_array(num_vfs, + sizeof(*iov->pe_num_map), + GFP_KERNEL); + else + iov->pe_num_map = kmalloc(sizeof(*iov->pe_num_map), GFP_KERNEL); + + if (!iov->pe_num_map) + return -ENOMEM; + + if (iov->m64_single_mode) + for (i = 0; i < num_vfs; i++) + iov->pe_num_map[i] = IODA_INVALID_PE; + + /* Calculate available PE for required VFs */ + if (iov->m64_single_mode) { + for (i = 0; i < num_vfs; i++) { + pe = pnv_ioda_alloc_pe(phb); + if (!pe) { + ret = -EBUSY; + goto m64_failed; + } + + iov->pe_num_map[i] = pe->pe_number; + } + } else { + mutex_lock(&phb->ioda.pe_alloc_mutex); + *iov->pe_num_map = bitmap_find_next_zero_area( + phb->ioda.pe_alloc, phb->ioda.total_pe_num, + 0, num_vfs, 0); + if (*iov->pe_num_map >= phb->ioda.total_pe_num) { + mutex_unlock(&phb->ioda.pe_alloc_mutex); + dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs); + kfree(iov->pe_num_map); + return -EBUSY; + } + bitmap_set(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs); + mutex_unlock(&phb->ioda.pe_alloc_mutex); + } + iov->num_vfs = num_vfs; + + /* Assign M64 window accordingly */ + ret = pnv_pci_vf_assign_m64(pdev, num_vfs); + if (ret) { + dev_info(&pdev->dev, "Not enough M64 window resources\n"); + goto m64_failed; + } + + /* + * When using one M64 BAR to map one IOV BAR, we need to shift + * the IOV BAR according to the PE# allocated to the VFs. + * Otherwise, the PE# for the VF will conflict with others. + */ + if (!iov->m64_single_mode) { + ret = pnv_pci_vf_resource_shift(pdev, *iov->pe_num_map); + if (ret) + goto m64_failed; + } + } + + /* Setup VF PEs */ + pnv_ioda_setup_vf_PE(pdev, num_vfs); + + return 0; + +m64_failed: + if (iov->m64_single_mode) { + for (i = 0; i < num_vfs; i++) { + if (iov->pe_num_map[i] == IODA_INVALID_PE) + continue; + + pe = &phb->ioda.pe_array[iov->pe_num_map[i]]; + pnv_ioda_free_pe(pe); + } + } else + bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs); + + /* Releasing pe_num_map */ + kfree(iov->pe_num_map); + + return ret; +} + +int pnv_pcibios_sriov_disable(struct pci_dev *pdev) +{ + pnv_pci_sriov_disable(pdev); + + /* Release PCI data */ + remove_sriov_vf_pdns(pdev); + return 0; +} + +int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) +{ + /* Allocate PCI data */ + add_sriov_vf_pdns(pdev); + + return pnv_pci_sriov_enable(pdev, num_vfs); +} diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 6aa6aefb637d..0156d7d17f7d 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -194,6 +194,80 @@ struct pnv_phb { u8 *diag_data; }; + +/* IODA PE management */ + +static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r) +{ + /* + * WARNING: We cannot rely on the resource flags. The Linux PCI + * allocation code sometimes decides to put a 64-bit prefetchable + * BAR in the 32-bit window, so we have to compare the addresses. + * + * For simplicity we only test resource start. + */ + return (r->start >= phb->ioda.m64_base && + r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); +} + +static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags) +{ + unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); + + return (resource_flags & flags) == flags; +} + +int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); +int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); + +void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); +void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe); + +struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb); +void pnv_ioda_free_pe(struct pnv_ioda_pe *pe); + +#ifdef CONFIG_PCI_IOV +/* + * For SR-IOV we want to put each VF's MMIO resource in to a separate PE. + * This requires a bit of acrobatics with the MMIO -> PE configuration + * and this structure is used to keep track of it all. + */ +struct pnv_iov_data { + /* number of VFs IOV BAR expanded. FIXME: rename this to something less bad */ + u16 vfs_expanded; + + /* number of VFs enabled */ + u16 num_vfs; + unsigned int *pe_num_map; /* PE# for the first VF PE or array */ + + /* Did we map the VF BARs with single-PE IODA BARs? */ + bool m64_single_mode; + + int (*m64_map)[PCI_SRIOV_NUM_BARS]; +#define IODA_INVALID_M64 (-1) + + /* + * If we map the SR-IOV BARs with a segmented window then + * parts of that window will be "claimed" by other PEs. + * + * "holes" here is used to reserve the leading portion + * of the window that is used by other (non VF) PEs. + */ + struct resource holes[PCI_SRIOV_NUM_BARS]; +}; + +static inline struct pnv_iov_data *pnv_iov_get(struct pci_dev *pdev) +{ + return pdev->dev.archdata.iov_data; +} + +void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev); +resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno); + +int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); +int pnv_pcibios_sriov_disable(struct pci_dev *pdev); +#endif /* CONFIG_PCI_IOV */ + extern struct pci_ops pnv_pci_ops; void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, -- cgit v1.2.3 From ff79e11af0979b25ecb38e4c843779d4a759a4e2 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:05 +1000 Subject: powerpc/powernv/sriov: Explain how SR-IOV works on PowerNV SR-IOV support on PowerNV is a byzantine maze of hooks. I have no idea how anyone is supposed to know how it works except through a lot of stuffering. Write up some docs about the overall story to help out the next sucker^Wperson who needs to tinker with it. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-6-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 130 +++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index c5b7e20286c6..8f5f0f50281e 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -12,6 +12,136 @@ /* for pci_dev_is_added() */ #include "../../../../drivers/pci/pci.h" +/* + * The majority of the complexity in supporting SR-IOV on PowerNV comes from + * the need to put the MMIO space for each VF into a separate PE. Internally + * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table". + * The MBT historically only applied to the 64bit MMIO window of the PHB + * so it's common to see it referred to as the "M64BT". + * + * An MBT entry stores the mapped range as an , pair. This forces + * the address range that we want to map to be power-of-two sized and aligned. + * For conventional PCI devices this isn't really an issue since PCI device BARs + * have the same requirement. + * + * For a SR-IOV BAR things are a little more awkward since size and alignment + * are not coupled. The alignment is set based on the the per-VF BAR size, but + * the total BAR area is: number-of-vfs * per-vf-size. The number of VFs + * isn't necessarily a power of two, so neither is the total size. To fix that + * we need to finesse (read: hack) the Linux BAR allocator so that it will + * allocate the SR-IOV BARs in a way that lets us map them using the MBT. + * + * The changes to size and alignment that we need to do depend on the "mode" + * of MBT entry that we use. We only support SR-IOV on PHB3 (IODA2) and above, + * so as a baseline we can assume that we have the following BAR modes + * available: + * + * NB: $PE_COUNT is the number of PEs that the PHB supports. + * + * a) A segmented BAR that splits the mapped range into $PE_COUNT equally sized + * segments. The n'th segment is mapped to the n'th PE. + * b) An un-segmented BAR that maps the whole address range to a specific PE. + * + * + * We prefer to use mode a) since it only requires one MBT entry per SR-IOV BAR + * For comparison b) requires one entry per-VF per-BAR, or: + * (num-vfs * num-sriov-bars) in total. To use a) we need the size of each segment + * to equal the size of the per-VF BAR area. So: + * + * new_size = per-vf-size * number-of-PEs + * + * The alignment for the SR-IOV BAR also needs to be changed from per-vf-size + * to "new_size", calculated above. Implementing this is a convoluted process + * which requires several hooks in the PCI core: + * + * 1. In pcibios_add_device() we call pnv_pci_ioda_fixup_iov(). + * + * At this point the device has been probed and the device's BARs are sized, + * but no resource allocations have been done. The SR-IOV BARs are sized + * based on the maximum number of VFs supported by the device and we need + * to increase that to new_size. + * + * 2. Later, when Linux actually assigns resources it tries to make the resource + * allocations for each PCI bus as compact as possible. As a part of that it + * sorts the BARs on a bus by their required alignment, which is calculated + * using pci_resource_alignment(). + * + * For IOV resources this goes: + * pci_resource_alignment() + * pci_sriov_resource_alignment() + * pcibios_sriov_resource_alignment() + * pnv_pci_iov_resource_alignment() + * + * Our hook overrides the default alignment, equal to the per-vf-size, with + * new_size computed above. + * + * 3. When userspace enables VFs for a device: + * + * sriov_enable() + * pcibios_sriov_enable() + * pnv_pcibios_sriov_enable() + * + * This is where we actually allocate PE numbers for each VF and setup the + * MBT mapping for each SR-IOV BAR. In steps 1) and 2) we setup an "arena" + * where each MBT segment is equal in size to the VF BAR so we can shift + * around the actual SR-IOV BAR location within this arena. We need this + * ability because the PE space is shared by all devices on the same PHB. + * When using mode a) described above segment 0 in maps to PE#0 which might + * be already being used by another device on the PHB. + * + * As a result we need allocate a contigious range of PE numbers, then shift + * the address programmed into the SR-IOV BAR of the PF so that the address + * of VF0 matches up with the segment corresponding to the first allocated + * PE number. This is handled in pnv_pci_vf_resource_shift(). + * + * Once all that is done we return to the PCI core which then enables VFs, + * scans them and creates pci_devs for each. The init process for a VF is + * largely the same as a normal device, but the VF is inserted into the IODA + * PE that we allocated for it rather than the PE associated with the bus. + * + * 4. When userspace disables VFs we unwind the above in + * pnv_pcibios_sriov_disable(). Fortunately this is relatively simple since + * we don't need to validate anything, just tear down the mappings and + * move SR-IOV resource back to its "proper" location. + * + * That's how mode a) works. In theory mode b) (single PE mapping) is less work + * since we can map each individual VF with a separate BAR. However, there's a + * few limitations: + * + * 1) For IODA2 mode b) has a minimum alignment requirement of 32MB. This makes + * it only usable for devices with very large per-VF BARs. Such devices are + * similar to Big Foot. They definitely exist, but I've never seen one. + * + * 2) The number of MBT entries that we have is limited. PHB3 and PHB4 only + * 16 total and some are needed for. Most SR-IOV capable network cards can support + * more than 16 VFs on each port. + * + * We use b) when using a) would use more than 1/4 of the entire 64 bit MMIO + * window of the PHB. + * + * + * + * PHB4 (IODA3) added a few new features that would be useful for SR-IOV. It + * allowed the MBT to map 32bit MMIO space in addition to 64bit which allows + * us to support SR-IOV BARs in the 32bit MMIO window. This is useful since + * the Linux BAR allocation will place any BAR marked as non-prefetchable into + * the non-prefetchable bridge window, which is 32bit only. It also added two + * new modes: + * + * c) A segmented BAR similar to a), but each segment can be individually + * mapped to any PE. This is matches how the 32bit MMIO window worked on + * IODA1&2. + * + * d) A segmented BAR with 8, 64, or 128 segments. This works similarly to a), + * but with fewer segments and configurable base PE. + * + * i.e. The n'th segment maps to the (n + base)'th PE. + * + * The base PE is also required to be a multiple of the window size. + * + * Unfortunately, the OPAL API doesn't currently (as of skiboot v6.6) allow us + * to exploit any of the IODA3 features. + */ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) { -- cgit v1.2.3 From fac248f8119170e3f8f54900985498ff6ee560bf Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:06 +1000 Subject: powerpc/powernv/sriov: Rename truncate_iov This prevents SR-IOV being used by making the SR-IOV BAR resources unallocatable. Rename it to reflect what it actually does. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-7-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 8f5f0f50281e..10ebdf05aeb8 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -155,7 +155,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) iov = kzalloc(sizeof(*iov), GFP_KERNEL); if (!iov) - goto truncate_iov; + goto disable_iov; pdev->dev.archdata.iov_data = iov; total_vfs = pci_sriov_get_totalvfs(pdev); @@ -169,7 +169,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) if (!pnv_pci_is_m64_flags(res->flags)) { dev_warn(&pdev->dev, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n", i, res); - goto truncate_iov; + goto disable_iov; } total_vf_bar_sz += pci_iov_resource_size(pdev, @@ -208,7 +208,8 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) * mode is 32MB. */ if (iov->m64_single_mode && (size < SZ_32M)) - goto truncate_iov; + goto disable_iov; + dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res); res->end = res->start + size * mul - 1; dev_dbg(&pdev->dev, " %pR\n", res); @@ -219,8 +220,8 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) return; -truncate_iov: - /* To save MMIO space, IOV BAR is truncated. */ +disable_iov: + /* Save ourselves some MMIO space by disabling the unusable BARs */ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; res->flags = 0; -- cgit v1.2.3 From ad9add529d99d195195c27abf99e42d4965d35e2 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:07 +1000 Subject: powerpc/powernv/sriov: Simplify used window tracking No need for the multi-dimensional arrays, just use a bitmap. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-8-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 50 ++++++++++-------------------- arch/powerpc/platforms/powernv/pci.h | 8 +++-- 2 files changed, 22 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 10ebdf05aeb8..261df2307175 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later #include #include @@ -302,28 +302,20 @@ static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) { struct pnv_iov_data *iov; struct pnv_phb *phb; - int i, j; - int m64_bars; + int window_id; phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); - if (iov->m64_single_mode) - m64_bars = num_vfs; - else - m64_bars = 1; + for_each_set_bit(window_id, iov->used_m64_bar_mask, MAX_M64_BARS) { + opal_pci_phb_mmio_enable(phb->opal_id, + OPAL_M64_WINDOW_TYPE, + window_id, + 0); - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) - for (j = 0; j < m64_bars; j++) { - if (iov->m64_map[j][i] == IODA_INVALID_M64) - continue; - opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 0); - clear_bit(iov->m64_map[j][i], &phb->ioda.m64_bar_alloc); - iov->m64_map[j][i] = IODA_INVALID_M64; - } + clear_bit(window_id, &phb->ioda.m64_bar_alloc); + } - kfree(iov->m64_map); return 0; } @@ -349,23 +341,14 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) else m64_bars = 1; - iov->m64_map = kmalloc_array(m64_bars, - sizeof(*iov->m64_map), - GFP_KERNEL); - if (!iov->m64_map) - return -ENOMEM; - /* Initialize the m64_map to IODA_INVALID_M64 */ - for (i = 0; i < m64_bars ; i++) - for (j = 0; j < PCI_SRIOV_NUM_BARS; j++) - iov->m64_map[i][j] = IODA_INVALID_M64; - - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; for (j = 0; j < m64_bars; j++) { + + /* allocate a window ID for this BAR */ do { win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, phb->ioda.m64_bar_idx + 1, 0); @@ -373,8 +356,7 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) if (win >= phb->ioda.m64_bar_idx + 1) goto m64_failed; } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); - - iov->m64_map[j][i] = win; + set_bit(win, iov->used_m64_bar_mask); if (iov->m64_single_mode) { size = pci_iov_resource_size(pdev, @@ -390,12 +372,12 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) pe_num = iov->pe_num_map[j]; rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe_num, OPAL_M64_WINDOW_TYPE, - iov->m64_map[j][i], 0); + win, 0); } rc = opal_pci_set_phb_mem_window(phb->opal_id, OPAL_M64_WINDOW_TYPE, - iov->m64_map[j][i], + win, start, 0, /* unused */ size); @@ -409,10 +391,10 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) if (iov->m64_single_mode) rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 2); + OPAL_M64_WINDOW_TYPE, win, 2); else rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, iov->m64_map[j][i], 1); + OPAL_M64_WINDOW_TYPE, win, 1); if (rc != OPAL_SUCCESS) { dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n", diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 0156d7d17f7d..23fc5e391c7f 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -154,6 +154,7 @@ struct pnv_phb { unsigned long m64_size; unsigned long m64_segsize; unsigned long m64_base; +#define MAX_M64_BARS 64 unsigned long m64_bar_alloc; /* IO ports */ @@ -243,8 +244,11 @@ struct pnv_iov_data { /* Did we map the VF BARs with single-PE IODA BARs? */ bool m64_single_mode; - int (*m64_map)[PCI_SRIOV_NUM_BARS]; -#define IODA_INVALID_M64 (-1) + /* + * Bit mask used to track which m64 windows are used to map the + * SR-IOV BARs for this device. + */ + DECLARE_BITMAP(used_m64_bar_mask, MAX_M64_BARS); /* * If we map the SR-IOV BARs with a segmented window then -- cgit v1.2.3 From a610d35cc8780e781321ea8d002d5fef8484bf59 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:08 +1000 Subject: powerpc/powernv/sriov: Factor out M64 BAR setup The sequence required to use the single PE BAR mode is kinda janky and requires a little explanation. The API was designed with P7-IOC style windows where the setup process is something like: 1. Configure the window start / end address 2. Enable the window 3. Map the segments of each window to the PE For Single PE BARs the process is: 1. Set the PE for segment zero on a disabled window 2. Set the range 3. Enable the window Move the OPAL calls into their own helper functions where the quirks can be contained. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-9-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 129 ++++++++++++++++++++++------- 1 file changed, 100 insertions(+), 29 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 261df2307175..5d2f5d5ff183 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -319,6 +319,99 @@ static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) return 0; } + +/* + * PHB3 and beyond support segmented windows. The window's address range + * is subdivided into phb->ioda.total_pe_num segments and there's a 1-1 + * mapping between PEs and segments. + */ +static int64_t pnv_ioda_map_m64_segmented(struct pnv_phb *phb, + int window_id, + resource_size_t start, + resource_size_t size) +{ + int64_t rc; + + rc = opal_pci_set_phb_mem_window(phb->opal_id, + OPAL_M64_WINDOW_TYPE, + window_id, + start, + 0, /* unused */ + size); + if (rc) + goto out; + + rc = opal_pci_phb_mmio_enable(phb->opal_id, + OPAL_M64_WINDOW_TYPE, + window_id, + OPAL_ENABLE_M64_SPLIT); +out: + if (rc) + pr_err("Failed to map M64 window #%d: %lld\n", window_id, rc); + + return rc; +} + +static int64_t pnv_ioda_map_m64_single(struct pnv_phb *phb, + int pe_num, + int window_id, + resource_size_t start, + resource_size_t size) +{ + int64_t rc; + + /* + * The API for setting up m64 mmio windows seems to have been designed + * with P7-IOC in mind. For that chip each M64 BAR (window) had a fixed + * split of 8 equally sized segments each of which could individually + * assigned to a PE. + * + * The problem with this is that the API doesn't have any way to + * communicate the number of segments we want on a BAR. This wasn't + * a problem for p7-ioc since you didn't have a choice, but the + * single PE windows added in PHB3 don't map cleanly to this API. + * + * As a result we've got this slightly awkward process where we + * call opal_pci_map_pe_mmio_window() to put the single in single + * PE mode, and set the PE for the window before setting the address + * bounds. We need to do it this way because the single PE windows + * for PHB3 have different alignment requirements on PHB3. + */ + rc = opal_pci_map_pe_mmio_window(phb->opal_id, + pe_num, + OPAL_M64_WINDOW_TYPE, + window_id, + 0); + if (rc) + goto out; + + /* + * NB: In single PE mode the window needs to be aligned to 32MB + */ + rc = opal_pci_set_phb_mem_window(phb->opal_id, + OPAL_M64_WINDOW_TYPE, + window_id, + start, + 0, /* ignored by FW, m64 is 1-1 */ + size); + if (rc) + goto out; + + /* + * Now actually enable it. We specified the BAR should be in "non-split" + * mode so FW will validate that the BAR is in single PE mode. + */ + rc = opal_pci_phb_mmio_enable(phb->opal_id, + OPAL_M64_WINDOW_TYPE, + window_id, + OPAL_ENABLE_M64_NON_SPLIT); +out: + if (rc) + pr_err("Error mapping single PE BAR\n"); + + return rc; +} + static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) { struct pnv_iov_data *iov; @@ -329,7 +422,6 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) int64_t rc; int total_vfs; resource_size_t size, start; - int pe_num; int m64_bars; phb = pci_bus_to_pnvhb(pdev->bus); @@ -358,49 +450,28 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); set_bit(win, iov->used_m64_bar_mask); + if (iov->m64_single_mode) { size = pci_iov_resource_size(pdev, PCI_IOV_RESOURCES + i); start = res->start + size * j; + rc = pnv_ioda_map_m64_single(phb, win, + iov->pe_num_map[j], + start, + size); } else { size = resource_size(res); start = res->start; - } - /* Map the M64 here */ - if (iov->m64_single_mode) { - pe_num = iov->pe_num_map[j]; - rc = opal_pci_map_pe_mmio_window(phb->opal_id, - pe_num, OPAL_M64_WINDOW_TYPE, - win, 0); + rc = pnv_ioda_map_m64_segmented(phb, win, start, + size); } - rc = opal_pci_set_phb_mem_window(phb->opal_id, - OPAL_M64_WINDOW_TYPE, - win, - start, - 0, /* unused */ - size); - - if (rc != OPAL_SUCCESS) { dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n", win, rc); goto m64_failed; } - - if (iov->m64_single_mode) - rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, win, 2); - else - rc = opal_pci_phb_mmio_enable(phb->opal_id, - OPAL_M64_WINDOW_TYPE, win, 1); - - if (rc != OPAL_SUCCESS) { - dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n", - win, rc); - goto m64_failed; - } } } return 0; -- cgit v1.2.3 From a4bc676ed5c3f53781cc342b73097eb7e8d43fa5 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:09 +1000 Subject: powerpc/powernv/pci: Refactor pnv_ioda_alloc_pe() Rework the PE allocation logic to allow allocating blocks of PEs rather than individually. We'll use this to allocate contigious blocks of PEs for the SR-IOVs. This patch also adds code to pnv_ioda_alloc_pe() and pnv_ioda_reserve_pe() to use the existing, but unused, phb->pe_alloc_mutex. Currently these functions use atomic bit ops to release a currently allocated PE number. However, the pnv_ioda_alloc_pe() wants to have exclusive access to the bit map while scanning for hole large enough to accomodate the allocation size. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-10-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-ioda.c | 41 +++++++++++++++++++++++++------ arch/powerpc/platforms/powernv/pci.h | 2 +- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 2d36a9ebf0e9..c9c25fb0783c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -145,23 +145,45 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) return; } + mutex_lock(&phb->ioda.pe_alloc_mutex); if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) pr_debug("%s: PE %x was reserved on PHB#%x\n", __func__, pe_no, phb->hose->global_number); + mutex_unlock(&phb->ioda.pe_alloc_mutex); pnv_ioda_init_pe(phb, pe_no); } -struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) +struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count) { - long pe; + struct pnv_ioda_pe *ret = NULL; + int run = 0, pe, i; + mutex_lock(&phb->ioda.pe_alloc_mutex); + + /* scan backwards for a run of @count cleared bits */ for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { - if (!test_and_set_bit(pe, phb->ioda.pe_alloc)) - return pnv_ioda_init_pe(phb, pe); + if (test_bit(pe, phb->ioda.pe_alloc)) { + run = 0; + continue; + } + + run++; + if (run == count) + break; } + if (run != count) + goto out; - return NULL; + for (i = pe; i < pe + count; i++) { + set_bit(i, phb->ioda.pe_alloc); + pnv_ioda_init_pe(phb, i); + } + ret = &phb->ioda.pe_array[pe]; + +out: + mutex_unlock(&phb->ioda.pe_alloc_mutex); + return ret; } void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) @@ -173,7 +195,10 @@ void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */ kfree(pe->npucomp); memset(pe, 0, sizeof(struct pnv_ioda_pe)); + + mutex_lock(&phb->ioda.pe_alloc_mutex); clear_bit(pe_num, phb->ioda.pe_alloc); + mutex_unlock(&phb->ioda.pe_alloc_mutex); } /* The default M64 BAR is shared by all PEs */ @@ -976,7 +1001,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) if (pdn->pe_number != IODA_INVALID_PE) return NULL; - pe = pnv_ioda_alloc_pe(phb); + pe = pnv_ioda_alloc_pe(phb, 1); if (!pe) { pr_warn("%s: Not enough PE# available, disabling device\n", pci_name(dev)); @@ -1047,7 +1072,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) /* The PE number isn't pinned by M64 */ if (!pe) - pe = pnv_ioda_alloc_pe(phb); + pe = pnv_ioda_alloc_pe(phb, 1); if (!pe) { pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n", @@ -3065,7 +3090,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); } else { /* otherwise just allocate one */ - root_pe = pnv_ioda_alloc_pe(phb); + root_pe = pnv_ioda_alloc_pe(phb, 1); phb->ioda.root_pe_idx = root_pe->pe_number; } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 23fc5e391c7f..06431a452130 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -224,7 +224,7 @@ int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe); void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe); -struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb); +struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count); void pnv_ioda_free_pe(struct pnv_ioda_pe *pe); #ifdef CONFIG_PCI_IOV -- cgit v1.2.3 From d29a2488d2c020032fdb1fe052347a6021e3591d Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:10 +1000 Subject: powerpc/powernv/sriov: Drop iov->pe_num_map[] Currently the iov->pe_num_map[] does one of two things depending on whether single PE mode is being used or not. When it is, this contains an array which maps a vf_index to the corresponding PE number. When single PE mode is not being used this contains a scalar which is the base PE for the set of enabled VFs (for for VFn is base + n). The array was necessary because when calling pnv_ioda_alloc_pe() there is no guarantee that the allocated PEs would be contigious. We can now allocate contigious blocks of PEs so this is no longer an issue. This allows us to drop the if (single_mode) {} .. else {} block scattered through the SR-IOV code which is a nice clean up. This also fixes a bug in pnv_pci_sriov_disable() which is the non-atomic bitmap_clear() to manipulate the PE allocation map. Other users of the map assume it will be accessed with atomic ops. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-11-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 109 ++++++----------------------- arch/powerpc/platforms/powernv/pci.h | 7 +- 2 files changed, 28 insertions(+), 88 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 5d2f5d5ff183..4c02a1d90afe 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -452,11 +452,13 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) if (iov->m64_single_mode) { + int pe_num = iov->vf_pe_arr[j].pe_number; + size = pci_iov_resource_size(pdev, PCI_IOV_RESOURCES + i); start = res->start + size * j; rc = pnv_ioda_map_m64_single(phb, win, - iov->pe_num_map[j], + pe_num, start, size); } else { @@ -595,38 +597,24 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) static void pnv_pci_sriov_disable(struct pci_dev *pdev) { + u16 num_vfs, base_pe; struct pnv_phb *phb; - struct pnv_ioda_pe *pe; struct pnv_iov_data *iov; - u16 num_vfs, i; phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); num_vfs = iov->num_vfs; + base_pe = iov->vf_pe_arr[0].pe_number; /* Release VF PEs */ pnv_ioda_release_vf_PE(pdev); if (phb->type == PNV_PHB_IODA2) { if (!iov->m64_single_mode) - pnv_pci_vf_resource_shift(pdev, -*iov->pe_num_map); + pnv_pci_vf_resource_shift(pdev, -base_pe); /* Release M64 windows */ pnv_pci_vf_release_m64(pdev, num_vfs); - - /* Release PE numbers */ - if (iov->m64_single_mode) { - for (i = 0; i < num_vfs; i++) { - if (iov->pe_num_map[i] == IODA_INVALID_PE) - continue; - - pe = &phb->ioda.pe_array[iov->pe_num_map[i]]; - pnv_ioda_free_pe(pe); - } - } else - bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs); - /* Releasing pe_num_map */ - kfree(iov->pe_num_map); } } @@ -652,13 +640,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) int vf_bus = pci_iov_virtfn_bus(pdev, vf_index); struct pci_dn *vf_pdn; - if (iov->m64_single_mode) - pe_num = iov->pe_num_map[vf_index]; - else - pe_num = *iov->pe_num_map + vf_index; - - pe = &phb->ioda.pe_array[pe_num]; - pe->pe_number = pe_num; + pe = &iov->vf_pe_arr[vf_index]; pe->phb = phb; pe->flags = PNV_IODA_PE_VF; pe->pbus = NULL; @@ -666,6 +648,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pe->mve_number = -1; pe->rid = (vf_bus << 8) | vf_devfn; + pe_num = pe->pe_number; pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num); @@ -697,9 +680,9 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { + struct pnv_ioda_pe *base_pe; struct pnv_iov_data *iov; struct pnv_phb *phb; - struct pnv_ioda_pe *pe; int ret; u16 i; @@ -713,55 +696,14 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) return -ENOSPC; } - /* - * When M64 BARs functions in Single PE mode, the number of VFs - * could be enabled must be less than the number of M64 BARs. - */ - if (iov->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) { - dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n"); + /* allocate a contigious block of PEs for our VFs */ + base_pe = pnv_ioda_alloc_pe(phb, num_vfs); + if (!base_pe) { + pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs); return -EBUSY; } - /* Allocating pe_num_map */ - if (iov->m64_single_mode) - iov->pe_num_map = kmalloc_array(num_vfs, - sizeof(*iov->pe_num_map), - GFP_KERNEL); - else - iov->pe_num_map = kmalloc(sizeof(*iov->pe_num_map), GFP_KERNEL); - - if (!iov->pe_num_map) - return -ENOMEM; - - if (iov->m64_single_mode) - for (i = 0; i < num_vfs; i++) - iov->pe_num_map[i] = IODA_INVALID_PE; - - /* Calculate available PE for required VFs */ - if (iov->m64_single_mode) { - for (i = 0; i < num_vfs; i++) { - pe = pnv_ioda_alloc_pe(phb); - if (!pe) { - ret = -EBUSY; - goto m64_failed; - } - - iov->pe_num_map[i] = pe->pe_number; - } - } else { - mutex_lock(&phb->ioda.pe_alloc_mutex); - *iov->pe_num_map = bitmap_find_next_zero_area( - phb->ioda.pe_alloc, phb->ioda.total_pe_num, - 0, num_vfs, 0); - if (*iov->pe_num_map >= phb->ioda.total_pe_num) { - mutex_unlock(&phb->ioda.pe_alloc_mutex); - dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs); - kfree(iov->pe_num_map); - return -EBUSY; - } - bitmap_set(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs); - mutex_unlock(&phb->ioda.pe_alloc_mutex); - } + iov->vf_pe_arr = base_pe; iov->num_vfs = num_vfs; /* Assign M64 window accordingly */ @@ -777,9 +719,10 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) * Otherwise, the PE# for the VF will conflict with others. */ if (!iov->m64_single_mode) { - ret = pnv_pci_vf_resource_shift(pdev, *iov->pe_num_map); + ret = pnv_pci_vf_resource_shift(pdev, + base_pe->pe_number); if (ret) - goto m64_failed; + goto shift_failed; } } @@ -788,20 +731,12 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) return 0; -m64_failed: - if (iov->m64_single_mode) { - for (i = 0; i < num_vfs; i++) { - if (iov->pe_num_map[i] == IODA_INVALID_PE) - continue; - - pe = &phb->ioda.pe_array[iov->pe_num_map[i]]; - pnv_ioda_free_pe(pe); - } - } else - bitmap_clear(phb->ioda.pe_alloc, *iov->pe_num_map, num_vfs); +shift_failed: + pnv_pci_vf_release_m64(pdev, num_vfs); - /* Releasing pe_num_map */ - kfree(iov->pe_num_map); +m64_failed: + for (i = 0; i < num_vfs; i++) + pnv_ioda_free_pe(&iov->vf_pe_arr[i]); return ret; } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 06431a452130..f76923f44f66 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -239,7 +239,12 @@ struct pnv_iov_data { /* number of VFs enabled */ u16 num_vfs; - unsigned int *pe_num_map; /* PE# for the first VF PE or array */ + + /* + * Pointer to the IODA PE state of each VF. Note that this is a pointer + * into the PHB's PE array (phb->ioda.pe_array). + */ + struct pnv_ioda_pe *vf_pe_arr; /* Did we map the VF BARs with single-PE IODA BARs? */ bool m64_single_mode; -- cgit v1.2.3 From 052da31d45fc71238ea8bed7e9a84648a1ee0bf3 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:11 +1000 Subject: powerpc/powernv/sriov: De-indent setup and teardown Remove the IODA2 PHB checks. We already assume IODA2 in several places so there's not much point in wrapping most of the setup and teardown process in an if block. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-12-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 85 +++++++++++++++++------------- arch/powerpc/platforms/powernv/pci.h | 5 +- 2 files changed, 49 insertions(+), 41 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 4c02a1d90afe..4bd617b19b6d 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -606,16 +606,18 @@ static void pnv_pci_sriov_disable(struct pci_dev *pdev) num_vfs = iov->num_vfs; base_pe = iov->vf_pe_arr[0].pe_number; + if (WARN_ON(!iov)) + return; + /* Release VF PEs */ pnv_ioda_release_vf_PE(pdev); - if (phb->type == PNV_PHB_IODA2) { - if (!iov->m64_single_mode) - pnv_pci_vf_resource_shift(pdev, -base_pe); + /* Un-shift the IOV BAR resources */ + if (!iov->m64_single_mode) + pnv_pci_vf_resource_shift(pdev, -base_pe); - /* Release M64 windows */ - pnv_pci_vf_release_m64(pdev, num_vfs); - } + /* Release M64 windows */ + pnv_pci_vf_release_m64(pdev, num_vfs); } static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) @@ -689,41 +691,50 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); - if (phb->type == PNV_PHB_IODA2) { - if (!iov->vfs_expanded) { - dev_info(&pdev->dev, - "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n"); - return -ENOSPC; - } + /* + * There's a calls to IODA2 PE setup code littered throughout. We could + * probably fix that, but we'd still have problems due to the + * restriction inherent on IODA1 PHBs. + * + * NB: We class IODA3 as IODA2 since they're very similar. + */ + if (phb->type != PNV_PHB_IODA2) { + pci_err(pdev, "SR-IOV is not supported on this PHB\n"); + return -ENXIO; + } - /* allocate a contigious block of PEs for our VFs */ - base_pe = pnv_ioda_alloc_pe(phb, num_vfs); - if (!base_pe) { - pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs); - return -EBUSY; - } + if (!iov->vfs_expanded) { + dev_info(&pdev->dev, "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n"); + return -ENOSPC; + } - iov->vf_pe_arr = base_pe; - iov->num_vfs = num_vfs; + /* allocate a contigious block of PEs for our VFs */ + base_pe = pnv_ioda_alloc_pe(phb, num_vfs); + if (!base_pe) { + pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs); + return -EBUSY; + } - /* Assign M64 window accordingly */ - ret = pnv_pci_vf_assign_m64(pdev, num_vfs); - if (ret) { - dev_info(&pdev->dev, "Not enough M64 window resources\n"); - goto m64_failed; - } + iov->vf_pe_arr = base_pe; + iov->num_vfs = num_vfs; - /* - * When using one M64 BAR to map one IOV BAR, we need to shift - * the IOV BAR according to the PE# allocated to the VFs. - * Otherwise, the PE# for the VF will conflict with others. - */ - if (!iov->m64_single_mode) { - ret = pnv_pci_vf_resource_shift(pdev, - base_pe->pe_number); - if (ret) - goto shift_failed; - } + /* Assign M64 window accordingly */ + ret = pnv_pci_vf_assign_m64(pdev, num_vfs); + if (ret) { + dev_info(&pdev->dev, "Not enough M64 window resources\n"); + goto m64_failed; + } + + /* + * When using one M64 BAR to map one IOV BAR, we need to shift + * the IOV BAR according to the PE# allocated to the VFs. + * Otherwise, the PE# for the VF will conflict with others. + */ + if (!iov->m64_single_mode) { + ret = pnv_pci_vf_resource_shift(pdev, + base_pe->pe_number); + if (ret) + goto shift_failed; } /* Setup VF PEs */ diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index f76923f44f66..41a6f4e938e4 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -240,10 +240,7 @@ struct pnv_iov_data { /* number of VFs enabled */ u16 num_vfs; - /* - * Pointer to the IODA PE state of each VF. Note that this is a pointer - * into the PHB's PE array (phb->ioda.pe_array). - */ + /* pointer to the array of VF PEs. num_vfs long*/ struct pnv_ioda_pe *vf_pe_arr; /* Did we map the VF BARs with single-PE IODA BARs? */ -- cgit v1.2.3 From 39efc03e3ee8f41909b7542be70b4061b38ca277 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:12 +1000 Subject: powerpc/powernv/sriov: Move M64 BAR allocation into a helper I want to refactor the loop this code is currently inside of. Hoist it on out. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-13-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 31 +++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 4bd617b19b6d..8eee59c210c8 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -412,6 +412,23 @@ out: return rc; } +static int pnv_pci_alloc_m64_bar(struct pnv_phb *phb, struct pnv_iov_data *iov) +{ + int win; + + do { + win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, + phb->ioda.m64_bar_idx + 1, 0); + + if (win >= phb->ioda.m64_bar_idx + 1) + return -1; + } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); + + set_bit(win, iov->used_m64_bar_mask); + + return win; +} + static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) { struct pnv_iov_data *iov; @@ -439,17 +456,9 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) continue; for (j = 0; j < m64_bars; j++) { - - /* allocate a window ID for this BAR */ - do { - win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, - phb->ioda.m64_bar_idx + 1, 0); - - if (win >= phb->ioda.m64_bar_idx + 1) - goto m64_failed; - } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); - set_bit(win, iov->used_m64_bar_mask); - + win = pnv_pci_alloc_m64_bar(phb, iov); + if (win < 0) + goto m64_failed; if (iov->m64_single_mode) { int pe_num = iov->vf_pe_arr[j].pe_number; -- cgit v1.2.3 From a0be516f8160fdb4836237cba037229e88a1de7d Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:13 +1000 Subject: powerpc/powernv/sriov: Refactor M64 BAR setup Split up the logic so that we have one branch that handles setting up a segmented window and another that handles setting up single PE windows for each VF. Signed-off-by: Oliver O'Halloran Reviewed-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-14-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 57 ++++++++++++++---------------- 1 file changed, 27 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 8eee59c210c8..31adc36c6bf1 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -437,52 +437,49 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) struct resource *res; int i, j; int64_t rc; - int total_vfs; resource_size_t size, start; - int m64_bars; + int base_pe_num; phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); - total_vfs = pci_sriov_get_totalvfs(pdev); - - if (iov->m64_single_mode) - m64_bars = num_vfs; - else - m64_bars = 1; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; - for (j = 0; j < m64_bars; j++) { + /* don't need single mode? map everything in one go! */ + if (!iov->m64_single_mode) { win = pnv_pci_alloc_m64_bar(phb, iov); if (win < 0) goto m64_failed; - if (iov->m64_single_mode) { - int pe_num = iov->vf_pe_arr[j].pe_number; - - size = pci_iov_resource_size(pdev, - PCI_IOV_RESOURCES + i); - start = res->start + size * j; - rc = pnv_ioda_map_m64_single(phb, win, - pe_num, - start, - size); - } else { - size = resource_size(res); - start = res->start; - - rc = pnv_ioda_map_m64_segmented(phb, win, start, - size); - } + size = resource_size(res); + start = res->start; - if (rc != OPAL_SUCCESS) { - dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n", - win, rc); + rc = pnv_ioda_map_m64_segmented(phb, win, start, size); + if (rc) + goto m64_failed; + + continue; + } + + /* otherwise map each VF with single PE BARs */ + size = pci_iov_resource_size(pdev, PCI_IOV_RESOURCES + i); + base_pe_num = iov->vf_pe_arr[0].pe_number; + + for (j = 0; j < num_vfs; j++) { + win = pnv_pci_alloc_m64_bar(phb, iov); + if (win < 0) + goto m64_failed; + + start = res->start + size * j; + rc = pnv_ioda_map_m64_single(phb, win, + base_pe_num + j, + start, + size); + if (rc) goto m64_failed; - } } } return 0; -- cgit v1.2.3 From 4c51f3e1e8702cbd0e53159fc3d1f54c20c70574 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:14 +1000 Subject: powerpc/powernv/sriov: Make single PE mode a per-BAR setting Using single PE BARs to map an SR-IOV BAR is really a choice about what strategy to use when mapping a BAR. It doesn't make much sense for this to be a global setting since a device might have one large BAR which needs to be mapped with single PE windows and another smaller BAR that can be mapped with a regular segmented window. Make the segmented vs single decision a per-BAR setting and clean up the logic that decides which mode to use. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-15-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 131 ++++++++++++++--------------- arch/powerpc/platforms/powernv/pci.h | 11 ++- 2 files changed, 73 insertions(+), 69 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 31adc36c6bf1..d73684869c49 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -146,21 +146,17 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); - const resource_size_t gate = phb->ioda.m64_segsize >> 2; struct resource *res; int i; - resource_size_t size, total_vf_bar_sz; + resource_size_t vf_bar_sz; struct pnv_iov_data *iov; - int mul, total_vfs; + int mul; iov = kzalloc(sizeof(*iov), GFP_KERNEL); if (!iov) goto disable_iov; pdev->dev.archdata.iov_data = iov; - - total_vfs = pci_sriov_get_totalvfs(pdev); mul = phb->ioda.total_pe_num; - total_vf_bar_sz = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; @@ -172,50 +168,50 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) goto disable_iov; } - total_vf_bar_sz += pci_iov_resource_size(pdev, - i + PCI_IOV_RESOURCES); + vf_bar_sz = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES); /* - * If bigger than quarter of M64 segment size, just round up - * power of two. + * Generally, one segmented M64 BAR maps one IOV BAR. However, + * if a VF BAR is too large we end up wasting a lot of space. + * If each VF needs more than 1/4 of the default m64 segment + * then each VF BAR should be mapped in single-PE mode to reduce + * the amount of space required. This does however limit the + * number of VFs we can support. * - * Generally, one M64 BAR maps one IOV BAR. To avoid conflict - * with other devices, IOV BAR size is expanded to be - * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64 - * segment size , the expanded size would equal to half of the - * whole M64 space size, which will exhaust the M64 Space and - * limit the system flexibility. This is a design decision to - * set the boundary to quarter of the M64 segment size. + * The 1/4 limit is arbitrary and can be tweaked. */ - if (total_vf_bar_sz > gate) { - mul = roundup_pow_of_two(total_vfs); - dev_info(&pdev->dev, - "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n", - total_vf_bar_sz, gate, mul); - iov->m64_single_mode = true; - break; - } - } + if (vf_bar_sz > (phb->ioda.m64_segsize >> 2)) { + /* + * On PHB3, the minimum size alignment of M64 BAR in + * single mode is 32MB. If this VF BAR is smaller than + * 32MB, but still too large for a segmented window + * then we can't map it and need to disable SR-IOV for + * this device. + */ + if (vf_bar_sz < SZ_32M) { + pci_err(pdev, "VF BAR%d: %pR can't be mapped in single PE mode\n", + i, res); + goto disable_iov; + } - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &pdev->resource[i + PCI_IOV_RESOURCES]; - if (!res->flags || res->parent) + iov->m64_single_mode[i] = true; continue; + } - size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES); /* - * On PHB3, the minimum size alignment of M64 BAR in single - * mode is 32MB. + * This BAR can be mapped with one segmented window, so adjust + * te resource size to accommodate. */ - if (iov->m64_single_mode && (size < SZ_32M)) - goto disable_iov; + pci_dbg(pdev, " Fixing VF BAR%d: %pR to\n", i, res); + res->end = res->start + vf_bar_sz * mul - 1; + pci_dbg(pdev, " %pR\n", res); - dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res); - res->end = res->start + size * mul - 1; - dev_dbg(&pdev->dev, " %pR\n", res); - dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)", + pci_info(pdev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)", i, res, mul); + + iov->need_shift = true; } + iov->vfs_expanded = mul; return; @@ -259,42 +255,40 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno) { - struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pnv_iov_data *iov = pnv_iov_get(pdev); resource_size_t align; + /* + * iov can be null if we have an SR-IOV device with IOV BAR that can't + * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch). + * In that case we don't allow VFs to be enabled since one of their + * BARs would not be placed in the correct PE. + */ + if (!iov) + return align; + if (!iov->vfs_expanded) + return align; + + align = pci_iov_resource_size(pdev, resno); + + /* + * If we're using single mode then we can just use the native VF BAR + * alignment. We validated that it's possible to use a single PE + * window above when we did the fixup. + */ + if (iov->m64_single_mode[resno - PCI_IOV_RESOURCES]) + return align; + /* * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the * SR-IOV. While from hardware perspective, the range mapped by M64 * BAR should be size aligned. * - * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra - * powernv-specific hardware restriction is gone. But if just use the - * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with - * in one segment of M64 #15, which introduces the PE conflict between - * PF and VF. Based on this, the minimum alignment of an IOV BAR is - * m64_segsize. - * * This function returns the total IOV BAR size if M64 BAR is in * Shared PE mode or just VF BAR size if not. * If the M64 BAR is in Single PE mode, return the VF BAR size or * M64 segment size if IOV BAR size is less. */ - align = pci_iov_resource_size(pdev, resno); - - /* - * iov can be null if we have an SR-IOV device with IOV BAR that can't - * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch). - * In that case we don't allow VFs to be enabled so just return the - * default alignment. - */ - if (!iov) - return align; - if (!iov->vfs_expanded) - return align; - if (iov->m64_single_mode) - return max(align, (resource_size_t)phb->ioda.m64_segsize); - return iov->vfs_expanded * align; } @@ -449,7 +443,7 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) continue; /* don't need single mode? map everything in one go! */ - if (!iov->m64_single_mode) { + if (!iov->m64_single_mode[i]) { win = pnv_pci_alloc_m64_bar(phb, iov); if (win < 0) goto m64_failed; @@ -542,6 +536,8 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) res = &dev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; + if (iov->m64_single_mode[i]) + continue; /* * The actual IOV BAR range is determined by the start address @@ -573,6 +569,8 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) res = &dev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; + if (iov->m64_single_mode[i]) + continue; size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); res2 = *res; @@ -618,8 +616,8 @@ static void pnv_pci_sriov_disable(struct pci_dev *pdev) /* Release VF PEs */ pnv_ioda_release_vf_PE(pdev); - /* Un-shift the IOV BAR resources */ - if (!iov->m64_single_mode) + /* Un-shift the IOV BARs if we need to */ + if (iov->need_shift) pnv_pci_vf_resource_shift(pdev, -base_pe); /* Release M64 windows */ @@ -736,9 +734,8 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) * the IOV BAR according to the PE# allocated to the VFs. * Otherwise, the PE# for the VF will conflict with others. */ - if (!iov->m64_single_mode) { - ret = pnv_pci_vf_resource_shift(pdev, - base_pe->pe_number); + if (iov->need_shift) { + ret = pnv_pci_vf_resource_shift(pdev, base_pe->pe_number); if (ret) goto shift_failed; } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 41a6f4e938e4..902e928c7c22 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -243,8 +243,15 @@ struct pnv_iov_data { /* pointer to the array of VF PEs. num_vfs long*/ struct pnv_ioda_pe *vf_pe_arr; - /* Did we map the VF BARs with single-PE IODA BARs? */ - bool m64_single_mode; + /* Did we map the VF BAR with single-PE IODA BARs? */ + bool m64_single_mode[PCI_SRIOV_NUM_BARS]; + + /* + * True if we're using any segmented windows. In that case we need + * shift the start of the IOV resource the segment corresponding to + * the allocated PE. + */ + bool need_shift; /* * Bit mask used to track which m64 windows are used to map the -- cgit v1.2.3 From 84d8505ed1dafb2e62d49fca5e7aa7d96cfcec49 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 22 Jul 2020 16:57:15 +1000 Subject: powerpc/powernv/sriov: Remove vfs_expanded Previously iov->vfs_expanded was used for two purposes. 1) To work out how much we need to multiple the per-VF BAR size to figure out the total space required for the IOV BAR. 2) To indicate that IOV is not usable with this device (vfs_expanded == 0). We don't really need the field for either since the multiple in 1) is always the number PEs supported by the PHB. Similarly, we don't really need it in 2) either since the IOV data field will be NULL if we can't use IOV with the device. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722065715.1432738-16-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 9 +++------ arch/powerpc/platforms/powernv/pci.h | 3 --- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index d73684869c49..8404d8c3901d 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -212,8 +212,6 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) iov->need_shift = true; } - iov->vfs_expanded = mul; - return; disable_iov: @@ -255,6 +253,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno) { + struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pnv_iov_data *iov = pnv_iov_get(pdev); resource_size_t align; @@ -266,8 +265,6 @@ resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, */ if (!iov) return align; - if (!iov->vfs_expanded) - return align; align = pci_iov_resource_size(pdev, resno); @@ -289,7 +286,7 @@ resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, * If the M64 BAR is in Single PE mode, return the VF BAR size or * M64 segment size if IOV BAR size is less. */ - return iov->vfs_expanded * align; + return phb->ioda.total_pe_num * align; } static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) @@ -707,7 +704,7 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) return -ENXIO; } - if (!iov->vfs_expanded) { + if (!iov) { dev_info(&pdev->dev, "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n"); return -ENOSPC; } diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 902e928c7c22..c8cc152bdf52 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -234,9 +234,6 @@ void pnv_ioda_free_pe(struct pnv_ioda_pe *pe); * and this structure is used to keep track of it all. */ struct pnv_iov_data { - /* number of VFs IOV BAR expanded. FIXME: rename this to something less bad */ - u16 vfs_expanded; - /* number of VFs enabled */ u16 num_vfs; -- cgit v1.2.3 From 65156f2b1d9d5bf3fd0eac54b0a7fd515c92773c Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Thu, 23 Jul 2020 03:32:37 -0400 Subject: powerpc/perf: Initialize power10 PMU registers in cpu setup routine Initialize Monitor Mode Control Register 3 (MMCR3) SPR which is new in power10. For PowerISA v3.1, BHRB disable is controlled via Monitor Mode Control Register A (MMCRA) bit, namely "BHRB Recording Disable (BHRBRD)". This patch also initializes MMCRA BHRBRD to disable BHRB feature at boot for power10. Reported-by: kernel test robot Signed-off-by: Athira Rajeev Reviewed-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1595489557-2047-1-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/kernel/cpu_setup_power.S | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 86527d19348c..704e8b9501ee 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -94,13 +94,15 @@ _GLOBAL(__restore_cpu_power8) _GLOBAL(__setup_cpu_power10) mflr r11 bl __init_FSCR_power10 + bl __init_PMU + bl __init_PMU_ISA31 b 1f _GLOBAL(__setup_cpu_power9) mflr r11 bl __init_FSCR_power9 -1: bl __init_PMU - bl __init_hvmode_206 + bl __init_PMU +1: bl __init_hvmode_206 mtlr r11 beqlr li r0,0 @@ -124,13 +126,15 @@ _GLOBAL(__setup_cpu_power9) _GLOBAL(__restore_cpu_power10) mflr r11 bl __init_FSCR_power10 + bl __init_PMU + bl __init_PMU_ISA31 b 1f _GLOBAL(__restore_cpu_power9) mflr r11 bl __init_FSCR_power9 -1: bl __init_PMU - mfmsr r3 + bl __init_PMU +1: mfmsr r3 rldicl. r0,r3,4,63 mtlr r11 beqlr @@ -239,3 +243,10 @@ __init_PMU_ISA207: li r5,0 mtspr SPRN_MMCRS,r5 blr + +__init_PMU_ISA31: + li r5,0 + mtspr SPRN_MMCR3,r5 + LOAD_REG_IMMEDIATE(r5, MMCRA_BHRB_DISABLE) + mtspr SPRN_MMCRA,r5 + blr -- cgit v1.2.3 From 8384c82ab0860cd7db2ce4ec403e574f4ee54b6e Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Remove dead and redundant code Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7720ffb559c334504e16b24d9c2f3b8973d2d674.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index fca31640e3ef..ce28ff40fb9c 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -18,24 +18,16 @@ #include #include #include -#include #include #include #include -#include #include #include -/*#define DEBUG_ADB_IOP*/ - static struct adb_request *current_req; static struct adb_request *last_req; -#if 0 -static unsigned char reply_buff[16]; -static unsigned char *reply_ptr; -#endif static enum adb_iop_state { idle, @@ -104,22 +96,11 @@ static void adb_iop_listen(struct iop_msg *msg) struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message; struct adb_request *req; unsigned long flags; -#ifdef DEBUG_ADB_IOP - int i; -#endif local_irq_save(flags); req = current_req; -#ifdef DEBUG_ADB_IOP - printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req, - (uint)amsg->count + 2, (uint)amsg->flags, (uint)amsg->cmd); - for (i = 0; i < amsg->count; i++) - printk(" %02X", (uint)amsg->data[i]); - printk("\n"); -#endif - /* Handle a timeout. Timeout packets seem to occur even after */ /* we've gotten a valid reply to a TALK, so I'm assuming that */ /* a "timeout" is actually more like an "end-of-data" signal. */ @@ -163,9 +144,6 @@ static void adb_iop_start(void) unsigned long flags; struct adb_request *req; struct adb_iopmsg amsg; -#ifdef DEBUG_ADB_IOP - int i; -#endif /* get the packet to send */ req = current_req; @@ -174,13 +152,6 @@ static void adb_iop_start(void) local_irq_save(flags); -#ifdef DEBUG_ADB_IOP - printk("adb_iop_start %p: sending packet, %d bytes:", req, req->nbytes); - for (i = 0; i < req->nbytes; i++) - printk(" %02X", (uint)req->data[i]); - printk("\n"); -#endif - /* The IOP takes MacII-style packets, so */ /* strip the initial ADB_PACKET byte. */ -- cgit v1.2.3 From ff785e179faf4bb06a2f73b8dcde6dabb66a83d2 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Correct comment text This patch improves comment style and corrects some misunderstandings in the text. Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/996f835d2f3d90baaaf9ee954e252d06e8886c6f.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index ce28ff40fb9c..ca3b411b0742 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -101,11 +101,10 @@ static void adb_iop_listen(struct iop_msg *msg) req = current_req; - /* Handle a timeout. Timeout packets seem to occur even after */ - /* we've gotten a valid reply to a TALK, so I'm assuming that */ - /* a "timeout" is actually more like an "end-of-data" signal. */ - /* We need to send back a timeout packet to the IOP to shut */ - /* it up, plus complete the current request, if any. */ + /* Handle a timeout. Timeout packets seem to occur even after + * we've gotten a valid reply to a TALK, presumably because of + * autopolling. + */ if (amsg->flags & ADB_IOP_TIMEOUT) { msg->reply[0] = ADB_IOP_TIMEOUT | ADB_IOP_AUTOPOLL; @@ -115,9 +114,6 @@ static void adb_iop_listen(struct iop_msg *msg) adb_iop_end_req(req, idle); } } else { - /* TODO: is it possible for more than one chunk of data */ - /* to arrive before the timeout? If so we need to */ - /* use reply_ptr here like the other drivers do. */ if ((adb_iop_state == awaiting_reply) && (amsg->flags & ADB_IOP_EXPLICIT)) { req->reply_len = amsg->count + 1; @@ -152,23 +148,24 @@ static void adb_iop_start(void) local_irq_save(flags); - /* The IOP takes MacII-style packets, so */ - /* strip the initial ADB_PACKET byte. */ - + /* The IOP takes MacII-style packets, so strip the initial + * ADB_PACKET byte. + */ amsg.flags = ADB_IOP_EXPLICIT; amsg.count = req->nbytes - 2; - /* amsg.data immediately follows amsg.cmd, effectively making */ - /* amsg.cmd a pointer to the beginning of a full ADB packet. */ + /* amsg.data immediately follows amsg.cmd, effectively making + * &amsg.cmd a pointer to the beginning of a full ADB packet. + */ memcpy(&amsg.cmd, req->data + 1, req->nbytes - 1); req->sent = 1; adb_iop_state = sending; local_irq_restore(flags); - /* Now send it. The IOP manager will call adb_iop_complete */ - /* when the packet has been sent. */ - + /* Now send it. The IOP manager will call adb_iop_complete + * when the message has been sent. + */ iop_send_message(ADB_IOP, ADB_CHAN, req, sizeof(amsg), (__u8 *)&amsg, adb_iop_complete); } -- cgit v1.2.3 From 303511edb859b1fbf48b3c1d1d53b33a6ebd2a2b Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Adopt bus reset algorithm from via-macii driver This algorithm is slightly shorter and avoids the surprising adb_iop_start() call in adb_iop_poll(). Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b63d56ecb6e75f11a0bf02231f3b2db656a528a3.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index ca3b411b0742..c3089dacf2e2 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -238,24 +238,19 @@ int adb_iop_autopoll(int devs) void adb_iop_poll(void) { - if (adb_iop_state == idle) - adb_iop_start(); iop_ism_irq_poll(ADB_IOP); } int adb_iop_reset_bus(void) { - struct adb_request req = { - .reply_expected = 0, - .nbytes = 2, - .data = { ADB_PACKET, 0 }, - }; - - adb_iop_write(&req); - while (!req.complete) { - adb_iop_poll(); - schedule(); - } + struct adb_request req; + + /* Command = 0, Address = ignored */ + adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_BUSRESET); + adb_iop_send_request(&req, 1); + + /* Don't want any more requests during the Global Reset low time. */ + mdelay(3); return 0; } -- cgit v1.2.3 From aac840eca8fec02d594560647130d4e4447e10d9 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Access current_req and adb_iop_state when inside lock Drop the redundant local_irq_save/restore() from adb_iop_start() because the caller has to do it anyway. This is the pattern used in via-macii. Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/bbe32b087c7e04d68e2425f6a2df4a414d167c32.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index c3089dacf2e2..7ecc41bc7358 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -137,7 +137,6 @@ static void adb_iop_listen(struct iop_msg *msg) static void adb_iop_start(void) { - unsigned long flags; struct adb_request *req; struct adb_iopmsg amsg; @@ -146,8 +145,6 @@ static void adb_iop_start(void) if (!req) return; - local_irq_save(flags); - /* The IOP takes MacII-style packets, so strip the initial * ADB_PACKET byte. */ @@ -161,7 +158,6 @@ static void adb_iop_start(void) req->sent = 1; adb_iop_state = sending; - local_irq_restore(flags); /* Now send it. The IOP manager will call adb_iop_complete * when the message has been sent. @@ -208,13 +204,13 @@ static int adb_iop_write(struct adb_request *req) return -EINVAL; } - local_irq_save(flags); - req->next = NULL; req->sent = 0; req->complete = 0; req->reply_len = 0; + local_irq_save(flags); + if (current_req != 0) { last_req->next = req; last_req = req; @@ -223,10 +219,11 @@ static int adb_iop_write(struct adb_request *req) last_req = req; } - local_irq_restore(flags); - if (adb_iop_state == idle) adb_iop_start(); + + local_irq_restore(flags); + return 0; } -- cgit v1.2.3 From 56b732edda96b1942fff974fd298ea2a2c543b94 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Resolve static checker warnings drivers/macintosh/adb-iop.c:215:28: warning: Using plain integer as NULL pointer drivers/macintosh/adb-iop.c:170:5: warning: symbol 'adb_iop_probe' was not declared. Should it be static? drivers/macintosh/adb-iop.c:177:5: warning: symbol 'adb_iop_init' was not declared. Should it be static? drivers/macintosh/adb-iop.c:184:5: warning: symbol 'adb_iop_send_request' was not declared. Should it be static? drivers/macintosh/adb-iop.c:230:5: warning: symbol 'adb_iop_autopoll' was not declared. Should it be static? drivers/macintosh/adb-iop.c:236:6: warning: symbol 'adb_iop_poll' was not declared. Should it be static? drivers/macintosh/adb-iop.c:241:5: warning: symbol 'adb_iop_reset_bus' was not declared. Should it be static? Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/25edf4450abd20e002b166ba3a11189dc1efa906.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index 7ecc41bc7358..a2b28e09f2ce 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -166,21 +166,21 @@ static void adb_iop_start(void) adb_iop_complete); } -int adb_iop_probe(void) +static int adb_iop_probe(void) { if (!iop_ism_present) return -ENODEV; return 0; } -int adb_iop_init(void) +static int adb_iop_init(void) { pr_info("adb: IOP ISM driver v0.4 for Unified ADB\n"); iop_listen(ADB_IOP, ADB_CHAN, adb_iop_listen, "ADB"); return 0; } -int adb_iop_send_request(struct adb_request *req, int sync) +static int adb_iop_send_request(struct adb_request *req, int sync) { int err; @@ -211,7 +211,7 @@ static int adb_iop_write(struct adb_request *req) local_irq_save(flags); - if (current_req != 0) { + if (current_req) { last_req->next = req; last_req = req; } else { @@ -227,18 +227,18 @@ static int adb_iop_write(struct adb_request *req) return 0; } -int adb_iop_autopoll(int devs) +static int adb_iop_autopoll(int devs) { /* TODO: how do we enable/disable autopoll? */ return 0; } -void adb_iop_poll(void) +static void adb_iop_poll(void) { iop_ism_irq_poll(ADB_IOP); } -int adb_iop_reset_bus(void) +static int adb_iop_reset_bus(void) { struct adb_request req; -- cgit v1.2.3 From 32226e81704398317e1cc5a82d24c0ef3cc25e5e Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Implement idle -> sending state transition In the present algorithm, the 'idle' state transition does not take place until there's a bus timeout. Once idle, the driver does not automatically proceed with the next request. Change the algorithm so that queued ADB requests will be sent as soon as the driver becomes idle. This is to take place after the current IOP message is completed. Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/dedcdfc62f43e85cc4c2a8d211a7e2fec7bc7c1a.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index a2b28e09f2ce..f22792c95d4f 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -54,13 +54,19 @@ struct adb_driver adb_iop_driver = { .reset_bus = adb_iop_reset_bus }; -static void adb_iop_end_req(struct adb_request *req, int state) +static void adb_iop_done(void) { + struct adb_request *req = current_req; + + adb_iop_state = idle; + req->complete = 1; current_req = req->next; if (req->done) (*req->done)(req); - adb_iop_state = state; + + if (adb_iop_state == idle) + adb_iop_start(); } /* @@ -94,37 +100,36 @@ static void adb_iop_complete(struct iop_msg *msg) static void adb_iop_listen(struct iop_msg *msg) { struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message; - struct adb_request *req; unsigned long flags; + bool req_done = false; local_irq_save(flags); - req = current_req; - /* Handle a timeout. Timeout packets seem to occur even after * we've gotten a valid reply to a TALK, presumably because of * autopolling. */ - if (amsg->flags & ADB_IOP_TIMEOUT) { - msg->reply[0] = ADB_IOP_TIMEOUT | ADB_IOP_AUTOPOLL; - msg->reply[1] = 0; - msg->reply[2] = 0; - if (req && (adb_iop_state != idle)) { - adb_iop_end_req(req, idle); - } - } else { - if ((adb_iop_state == awaiting_reply) && - (amsg->flags & ADB_IOP_EXPLICIT)) { + if (amsg->flags & ADB_IOP_EXPLICIT) { + if (adb_iop_state == awaiting_reply) { + struct adb_request *req = current_req; + req->reply_len = amsg->count + 1; memcpy(req->reply, &amsg->cmd, req->reply_len); - } else { - adb_input(&amsg->cmd, amsg->count + 1, - amsg->flags & ADB_IOP_AUTOPOLL); + + req_done = true; } - memcpy(msg->reply, msg->message, IOP_MSG_LEN); + } else if (!(amsg->flags & ADB_IOP_TIMEOUT)) { + adb_input(&amsg->cmd, amsg->count + 1, + amsg->flags & ADB_IOP_AUTOPOLL); } + + msg->reply[0] = ADB_IOP_AUTOPOLL; iop_complete_message(msg); + + if (req_done) + adb_iop_done(); + local_irq_restore(flags); } -- cgit v1.2.3 From e2954e5f727fad126258e83259b513988973c166 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Implement sending -> idle state transition On leaving the 'sending' state, proceed to the 'idle' state if no reply is expected. Drop redundant test for adb_iop_state == sending && current_req. Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6991996dd4aaf0b52cfd650172bf0f6fbe37a452.1590880623.git.fthain@telegraphics.com.au --- drivers/macintosh/adb-iop.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index f22792c95d4f..8594e4f9a830 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -77,15 +77,14 @@ static void adb_iop_done(void) static void adb_iop_complete(struct iop_msg *msg) { - struct adb_request *req; unsigned long flags; local_irq_save(flags); - req = current_req; - if ((adb_iop_state == sending) && req && req->reply_expected) { + if (current_req->reply_expected) adb_iop_state = awaiting_reply; - } + else + adb_iop_done(); local_irq_restore(flags); } -- cgit v1.2.3 From c66da95a39ec2bb95544c3def974d96e8c178f57 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:17:03 +1000 Subject: macintosh/adb-iop: Implement SRQ autopolling The adb_driver.autopoll method is needed during ADB bus scan and device address assignment. Implement this method so that the IOP's list of device addresses can be updated. When the list is empty, disable SRQ autopolling. Signed-off-by: Finn Thain Tested-by: Stan Johnson Acked-by: Geert Uytterhoeven Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/0fb7fdcd99d7820bb27faf1f27f7f6f1923914ef.1590880623.git.fthain@telegraphics.com.au --- arch/m68k/include/asm/adb_iop.h | 1 + drivers/macintosh/adb-iop.c | 32 ++++++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/arch/m68k/include/asm/adb_iop.h b/arch/m68k/include/asm/adb_iop.h index 195d7fb1268c..6aecd020e2fc 100644 --- a/arch/m68k/include/asm/adb_iop.h +++ b/arch/m68k/include/asm/adb_iop.h @@ -29,6 +29,7 @@ #define ADB_IOP_EXPLICIT 0x80 /* nonzero if explicit command */ #define ADB_IOP_AUTOPOLL 0x40 /* auto/SRQ polling enabled */ +#define ADB_IOP_SET_AUTOPOLL 0x20 /* set autopoll device list */ #define ADB_IOP_SRQ 0x04 /* SRQ detected */ #define ADB_IOP_TIMEOUT 0x02 /* nonzero if timeout */ diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index 8594e4f9a830..f3d1a460fbce 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -7,10 +7,6 @@ * 1999-07-01 (jmt) - First implementation for new driver architecture. * * 1999-07-31 (jmt) - First working version. - * - * TODO: - * - * o Implement SRQ handling. */ #include @@ -28,6 +24,7 @@ static struct adb_request *current_req; static struct adb_request *last_req; +static unsigned int autopoll_devs; static enum adb_iop_state { idle, @@ -123,7 +120,7 @@ static void adb_iop_listen(struct iop_msg *msg) amsg->flags & ADB_IOP_AUTOPOLL); } - msg->reply[0] = ADB_IOP_AUTOPOLL; + msg->reply[0] = autopoll_devs ? ADB_IOP_AUTOPOLL : 0; iop_complete_message(msg); if (req_done) @@ -231,9 +228,32 @@ static int adb_iop_write(struct adb_request *req) return 0; } +static void adb_iop_set_ap_complete(struct iop_msg *msg) +{ + struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message; + + autopoll_devs = (amsg->data[1] << 8) | amsg->data[0]; +} + static int adb_iop_autopoll(int devs) { - /* TODO: how do we enable/disable autopoll? */ + struct adb_iopmsg amsg; + unsigned long flags; + unsigned int mask = (unsigned int)devs & 0xFFFE; + + local_irq_save(flags); + + amsg.flags = ADB_IOP_SET_AUTOPOLL | (mask ? ADB_IOP_AUTOPOLL : 0); + amsg.count = 2; + amsg.cmd = 0; + amsg.data[0] = mask & 0xFF; + amsg.data[1] = (mask >> 8) & 0xFF; + + iop_send_message(ADB_IOP, ADB_CHAN, NULL, sizeof(amsg), (__u8 *)&amsg, + adb_iop_set_ap_complete); + + local_irq_restore(flags); + return 0; } -- cgit v1.2.3 From 59ea38f6b3af5636edf541768a1ed721eeaca99e Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Access autopoll_devs when inside lock The interrupt handler should be excluded when accessing the autopoll_devs variable. Fixes: d95fd5fce88f0 ("m68k: Mac II ADB fixes") # v5.0+ Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/5952dd8a9bc9de90f1acc4790c51dd42b4c98065.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index ac824d7b2dcf..6aa903529570 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -270,15 +270,12 @@ static int macii_autopoll(int devs) unsigned long flags; int err = 0; + local_irq_save(flags); + /* bit 1 == device 1, and so on. */ autopoll_devs = devs & 0xFFFE; - if (!autopoll_devs) - return 0; - - local_irq_save(flags); - - if (current_req == NULL) { + if (autopoll_devs && !current_req) { /* Send a Talk Reg 0. The controller will repeatedly transmit * this as long as it is idle. */ -- cgit v1.2.3 From f93bfeb55255bddaa16597e187a99ae6131b964a Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Poll the device most likely to respond Poll the most recently polled device by default, rather than the lowest device address that happens to be enabled in autopoll_devs. This improves input latency. Re-use macii_queue_poll() rather than duplicate that logic. This eliminates a static struct and function. Fixes: d95fd5fce88f0 ("m68k: Mac II ADB fixes") # v5.0+ Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/5836f80886ebcfbe5be5fb7e0dc49feed6469712.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 99 +++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 46 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 6aa903529570..d4f1a65c5f1f 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -77,6 +77,10 @@ static volatile unsigned char *via; #define ST_ODD 0x20 /* ADB state: odd data byte */ #define ST_IDLE 0x30 /* ADB state: idle, nothing to send */ +/* ADB command byte structure */ +#define ADDR_MASK 0xF0 +#define CMD_MASK 0x0F + static int macii_init_via(void); static void macii_start(void); static irqreturn_t macii_interrupt(int irq, void *arg); @@ -117,7 +121,8 @@ static int reply_len; /* number of bytes received in reply_buf or req->reply */ static int status; /* VIA's ADB status bits captured upon interrupt */ static int last_status; /* status bits as at previous interrupt */ static int srq_asserted; /* have to poll for the device that asserted it */ -static int command_byte; /* the most recent command byte transmitted */ +static u8 last_cmd; /* the most recent command byte transmitted */ +static u8 last_poll_cmd; /* the most recent Talk R0 command byte transmitted */ static int autopoll_devs; /* bits set are device addresses to be polled */ /* Check for MacII style ADB */ @@ -179,35 +184,49 @@ static int macii_init_via(void) /* Send an ADB poll (Talk Register 0 command prepended to the request queue) */ static void macii_queue_poll(void) { - /* No point polling the active device as it will never assert SRQ, so - * poll the next device in the autopoll list. This could leave us - * stuck in a polling loop if an unprobed device is asserting SRQ. - * In theory, that could only happen if a device was plugged in after - * probing started. Unplugging it again will break the cycle. - * (Simply polling the next higher device often ends up polling almost - * every device (after wrapping around), which takes too long.) - */ - int device_mask; - int next_device; static struct adb_request req; + unsigned char poll_command; + unsigned int poll_addr; + /* This only polls devices in the autopoll list, which assumes that + * unprobed devices never assert SRQ. That could happen if a device was + * plugged in after the adb bus scan. Unplugging it again will resolve + * the problem. This behaviour is similar to MacOS. + */ if (!autopoll_devs) return; - device_mask = (1 << (((command_byte & 0xF0) >> 4) + 1)) - 1; - if (autopoll_devs & ~device_mask) - next_device = ffs(autopoll_devs & ~device_mask) - 1; - else - next_device = ffs(autopoll_devs) - 1; + /* The device most recently polled may not be the best device to poll + * right now. Some other device(s) may have signalled SRQ (the active + * device won't do that). Or the autopoll list may have been changed. + * Try polling the next higher address. + */ + poll_addr = (last_poll_cmd & ADDR_MASK) >> 4; + if ((srq_asserted && last_cmd == last_poll_cmd) || + !(autopoll_devs & (1 << poll_addr))) { + unsigned int higher_devs; + + higher_devs = autopoll_devs & -(1 << (poll_addr + 1)); + poll_addr = ffs(higher_devs ? higher_devs : autopoll_devs) - 1; + } - adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_READREG(next_device, 0)); + /* Send a Talk Register 0 command */ + poll_command = ADB_READREG(poll_addr, 0); + + /* No need to repeat this Talk command. The transceiver will do that + * as long as it is idle. + */ + if (poll_command == last_cmd) + return; + + adb_request(&req, NULL, ADBREQ_NOSEND, 1, poll_command); req.sent = 0; req.complete = 0; req.reply_len = 0; req.next = current_req; - if (current_req != NULL) { + if (WARN_ON(current_req)) { current_req = &req; } else { current_req = &req; @@ -266,37 +285,22 @@ static int macii_write(struct adb_request *req) /* Start auto-polling */ static int macii_autopoll(int devs) { - static struct adb_request req; unsigned long flags; - int err = 0; local_irq_save(flags); /* bit 1 == device 1, and so on. */ autopoll_devs = devs & 0xFFFE; - if (autopoll_devs && !current_req) { - /* Send a Talk Reg 0. The controller will repeatedly transmit - * this as long as it is idle. - */ - adb_request(&req, NULL, ADBREQ_NOSEND, 1, - ADB_READREG(ffs(autopoll_devs) - 1, 0)); - err = macii_write(&req); + if (!current_req) { + macii_queue_poll(); + if (current_req && macii_state == idle) + macii_start(); } local_irq_restore(flags); - return err; -} -static inline int need_autopoll(void) -{ - /* Was the last command Talk Reg 0 - * and is the target on the autopoll list? - */ - if ((command_byte & 0x0F) == 0x0C && - ((1 << ((command_byte & 0xF0) >> 4)) & autopoll_devs)) - return 0; - return 1; + return 0; } /* Prod the chip without interrupts */ @@ -333,7 +337,12 @@ static void macii_start(void) */ /* store command byte */ - command_byte = req->data[1]; + last_cmd = req->data[1]; + + /* If this is a Talk Register 0 command, store the command byte */ + if ((last_cmd & CMD_MASK) == ADB_READREG(0, 0)) + last_poll_cmd = last_cmd; + /* Output mode */ via[ACR] |= SR_OUT; /* Load data */ @@ -424,10 +433,11 @@ static irqreturn_t macii_interrupt(int irq, void *arg) if (req->done) (*req->done)(req); - if (current_req) + if (!current_req) + macii_queue_poll(); + + if (current_req && macii_state == idle) macii_start(); - else if (need_autopoll()) - macii_autopoll(autopoll_devs); } if (macii_state == idle) { @@ -507,14 +517,11 @@ static irqreturn_t macii_interrupt(int irq, void *arg) macii_state = idle; - /* SRQ seen before, initiate poll now */ - if (srq_asserted) + if (!current_req) macii_queue_poll(); if (current_req) macii_start(); - else if (need_autopoll()) - macii_autopoll(autopoll_devs); if (macii_state == idle) via[B] = (via[B] & ~ST_MASK) | ST_IDLE; -- cgit v1.2.3 From b4d76c28eca369b8105fe3a0a9f396e3fbcd0dd5 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Handle /CTLR_IRQ signal correctly I'm told that the /CTLR_IRQ signal from the ADB transceiver gets interpreted by MacOS to mean SRQ, bus timeout or end-of-packet depending on the circumstances, and that Linux's via-macii driver does not correctly interpret this signal. Instead, the via-macii driver interprets certain received byte values (0x00 and 0xFF) as signalling end of packet and bus timeout (respectively). Problem is, those values can also appear under other circumstances. This patch changes the bus timeout, end of packet and SRQ detection logic to bring it closer to the logic that MacOS reportedly uses. Fixes: 1da177e4c3f41 ("Linux-2.6.12-rc2") # v5.0+ Reported-by: Mark Cave-Ayland Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6541fda1d8db3ae87c3abe17d189a10dc96e2382.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 166 ++++++++++++++++++++++++------------------ 1 file changed, 97 insertions(+), 69 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index d4f1a65c5f1f..6a5cd7de05ba 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -80,6 +80,8 @@ static volatile unsigned char *via; /* ADB command byte structure */ #define ADDR_MASK 0xF0 #define CMD_MASK 0x0F +#define OP_MASK 0x0C +#define TALK 0x0C static int macii_init_via(void); static void macii_start(void); @@ -119,9 +121,10 @@ static int reading_reply; /* store reply in reply_buf else req->reply */ static int data_index; /* index of the next byte to send from req->data */ static int reply_len; /* number of bytes received in reply_buf or req->reply */ static int status; /* VIA's ADB status bits captured upon interrupt */ -static int last_status; /* status bits as at previous interrupt */ -static int srq_asserted; /* have to poll for the device that asserted it */ +static bool bus_timeout; /* no data was sent by the device */ +static bool srq_asserted; /* have to poll for the device that asserted it */ static u8 last_cmd; /* the most recent command byte transmitted */ +static u8 last_talk_cmd; /* the most recent Talk command byte transmitted */ static u8 last_poll_cmd; /* the most recent Talk R0 command byte transmitted */ static int autopoll_devs; /* bits set are device addresses to be polled */ @@ -170,7 +173,6 @@ static int macii_init_via(void) /* Set up state: idle */ via[B] |= ST_IDLE; - last_status = via[B] & (ST_MASK | CTLR_IRQ); /* Shift register on input */ via[ACR] = (via[ACR] & ~SR_CTRL) | SR_EXT; @@ -336,13 +338,6 @@ static void macii_start(void) * And req->nbytes is the number of bytes of real data plus one. */ - /* store command byte */ - last_cmd = req->data[1]; - - /* If this is a Talk Register 0 command, store the command byte */ - if ((last_cmd & CMD_MASK) == ADB_READREG(0, 0)) - last_poll_cmd = last_cmd; - /* Output mode */ via[ACR] |= SR_OUT; /* Load data */ @@ -352,6 +347,9 @@ static void macii_start(void) macii_state = sending; data_index = 2; + + bus_timeout = false; + srq_asserted = false; } /* @@ -360,15 +358,17 @@ static void macii_start(void) * generating shift register interrupts (SR_INT) for us. This means there has * to be activity on the ADB bus. The chip will poll to achieve this. * - * The basic ADB state machine was left unchanged from the original MacII code - * by Alan Cox, which was based on the CUDA driver for PowerMac. - * The syntax of the ADB status lines is totally different on MacII, - * though. MacII uses the states Command -> Even -> Odd -> Even ->...-> Idle - * for sending and Idle -> Even -> Odd -> Even ->...-> Idle for receiving. - * Start and end of a receive packet are signalled by asserting /IRQ on the - * interrupt line (/IRQ means the CTLR_IRQ bit in port B; not to be confused - * with the VIA shift register interrupt. /IRQ never actually interrupts the - * processor, it's just an ordinary input.) + * The VIA Port B output signalling works as follows. After the ADB transceiver + * sees a transition on the PB4 and PB5 lines it will crank over the VIA shift + * register which eventually raises the SR_INT interrupt. The PB4/PB5 outputs + * are toggled with each byte as the ADB transaction progresses. + * + * Request with no reply expected (and empty transceiver buffer): + * CMD -> IDLE + * Request with expected reply packet (or with buffered autopoll packet): + * CMD -> EVEN -> ODD -> EVEN -> ... -> IDLE + * Unsolicited packet: + * IDLE -> EVEN -> ODD -> EVEN -> ... -> IDLE */ static irqreturn_t macii_interrupt(int irq, void *arg) { @@ -388,31 +388,31 @@ static irqreturn_t macii_interrupt(int irq, void *arg) } } - last_status = status; status = via[B] & (ST_MASK | CTLR_IRQ); switch (macii_state) { case idle: - if (reading_reply) { - reply_ptr = current_req->reply; - } else { - WARN_ON(current_req); - reply_ptr = reply_buf; - } + WARN_ON((status & ST_MASK) != ST_IDLE); + + reply_ptr = reply_buf; + reading_reply = 0; + + bus_timeout = false; + srq_asserted = false; x = via[SR]; - if ((status & CTLR_IRQ) && (x == 0xFF)) { - /* Bus timeout without SRQ sequence: - * data is "FF" while CTLR_IRQ is "H" + if (!(status & CTLR_IRQ)) { + /* /CTLR_IRQ asserted in idle state means we must + * read an autopoll reply from the transceiver buffer. */ - reply_len = 0; - srq_asserted = 0; - macii_state = read_done; - } else { macii_state = reading; *reply_ptr = x; reply_len = 1; + } else { + /* bus timeout */ + macii_state = read_done; + reply_len = 0; } /* set ADB state = even for first data byte */ @@ -421,13 +421,52 @@ static irqreturn_t macii_interrupt(int irq, void *arg) case sending: req = current_req; - if (data_index >= req->nbytes) { + + if (status == (ST_CMD | CTLR_IRQ)) { + /* /CTLR_IRQ de-asserted after the command byte means + * the host can continue with the transaction. + */ + + /* Store command byte */ + last_cmd = req->data[1]; + if ((last_cmd & OP_MASK) == TALK) { + last_talk_cmd = last_cmd; + if ((last_cmd & CMD_MASK) == ADB_READREG(0, 0)) + last_poll_cmd = last_cmd; + } + } + + if (status == ST_CMD) { + /* /CTLR_IRQ asserted after the command byte means we + * must read an autopoll reply. The first byte was + * lost because the shift register was an output. + */ + macii_state = reading; + + reading_reply = 0; + reply_ptr = reply_buf; + *reply_ptr = last_talk_cmd; + reply_len = 1; + + /* reset to shift in */ + via[ACR] &= ~SR_OUT; + x = via[SR]; + } else if (data_index >= req->nbytes) { req->sent = 1; - macii_state = idle; if (req->reply_expected) { + macii_state = reading; + reading_reply = 1; + reply_ptr = req->reply; + *reply_ptr = req->data[1]; + reply_len = 1; + + via[ACR] &= ~SR_OUT; + x = via[SR]; } else { + macii_state = idle; + req->complete = 1; current_req = req->next; if (req->done) @@ -438,25 +477,26 @@ static irqreturn_t macii_interrupt(int irq, void *arg) if (current_req && macii_state == idle) macii_start(); - } - if (macii_state == idle) { - /* reset to shift in */ - via[ACR] &= ~SR_OUT; - x = via[SR]; - /* set ADB state idle - might get SRQ */ - via[B] = (via[B] & ~ST_MASK) | ST_IDLE; + if (macii_state == idle) { + /* reset to shift in */ + via[ACR] &= ~SR_OUT; + x = via[SR]; + /* set ADB state idle - might get SRQ */ + via[B] = (via[B] & ~ST_MASK) | ST_IDLE; + } + break; } } else { via[SR] = req->data[data_index++]; + } - if ((via[B] & ST_MASK) == ST_CMD) { - /* just sent the command byte, set to EVEN */ - via[B] = (via[B] & ~ST_MASK) | ST_EVEN; - } else { - /* invert state bits, toggle ODD/EVEN */ - via[B] ^= ST_MASK; - } + if ((via[B] & ST_MASK) == ST_CMD) { + /* just sent the command byte, set to EVEN */ + via[B] = (via[B] & ~ST_MASK) | ST_EVEN; + } else { + /* invert state bits, toggle ODD/EVEN */ + via[B] ^= ST_MASK; } break; @@ -465,28 +505,13 @@ static irqreturn_t macii_interrupt(int irq, void *arg) WARN_ON((status & ST_MASK) == ST_CMD || (status & ST_MASK) == ST_IDLE); - /* Bus timeout with SRQ sequence: - * data is "XX FF" while CTLR_IRQ is "L L" - * End of packet without SRQ sequence: - * data is "XX...YY 00" while CTLR_IRQ is "L...H L" - * End of packet SRQ sequence: - * data is "XX...YY 00" while CTLR_IRQ is "L...L L" - * (where XX is the first response byte and - * YY is the last byte of valid response data.) - */ - - srq_asserted = 0; if (!(status & CTLR_IRQ)) { - if (x == 0xFF) { - if (!(last_status & CTLR_IRQ)) { - macii_state = read_done; - reply_len = 0; - srq_asserted = 1; - } - } else if (x == 0x00) { + if (status == ST_EVEN && reply_len == 1) { + bus_timeout = true; + } else if (status == ST_ODD && reply_len == 2) { + srq_asserted = true; + } else { macii_state = read_done; - if (!(last_status & CTLR_IRQ)) - srq_asserted = 1; } } @@ -504,6 +529,9 @@ static irqreturn_t macii_interrupt(int irq, void *arg) case read_done: x = via[SR]; + if (bus_timeout) + reply_len = 0; + if (reading_reply) { reading_reply = 0; req = current_req; -- cgit v1.2.3 From b16b67689baa01a5616b651356df7ad3e47a8763 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Remove read_done state The driver state machine may enter the 'read_done' state when leaving the 'idle' or 'reading' state. This transition is pointless, as is the extra interrupt it requires. The interrupt is produced by the transceiver (even when it has no data to send) because an extra EVEN/ODD toggle was signalled by the driver. Drop the extra state to simplify the code. Fixes: 1da177e4c3f41 ("Linux-2.6.12-rc2") # v5.0+ Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/0253194363af4426f9788796811a6a29fb87c713.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 70 +++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 42 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 6a5cd7de05ba..d29c87943ca4 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -110,7 +110,6 @@ static enum macii_state { idle, sending, reading, - read_done, } macii_state; static struct adb_request *current_req; /* first request struct in the queue */ @@ -411,8 +410,8 @@ static irqreturn_t macii_interrupt(int irq, void *arg) reply_len = 1; } else { /* bus timeout */ - macii_state = read_done; reply_len = 0; + break; } /* set ADB state = even for first data byte */ @@ -471,20 +470,6 @@ static irqreturn_t macii_interrupt(int irq, void *arg) current_req = req->next; if (req->done) (*req->done)(req); - - if (!current_req) - macii_queue_poll(); - - if (current_req && macii_state == idle) - macii_start(); - - if (macii_state == idle) { - /* reset to shift in */ - via[ACR] &= ~SR_OUT; - x = via[SR]; - /* set ADB state idle - might get SRQ */ - via[B] = (via[B] & ~ST_MASK) | ST_IDLE; - } break; } } else { @@ -511,12 +496,28 @@ static irqreturn_t macii_interrupt(int irq, void *arg) } else if (status == ST_ODD && reply_len == 2) { srq_asserted = true; } else { - macii_state = read_done; + macii_state = idle; + + if (bus_timeout) + reply_len = 0; + + if (reading_reply) { + struct adb_request *req = current_req; + + req->reply_len = reply_len; + + req->complete = 1; + current_req = req->next; + if (req->done) + (*req->done)(req); + } else if (reply_len && autopoll_devs) { + adb_input(reply_buf, reply_len, 0); + } + break; } } - if (macii_state == reading && - reply_len < ARRAY_SIZE(reply_buf)) { + if (reply_len < ARRAY_SIZE(reply_buf)) { reply_ptr++; *reply_ptr = x; reply_len++; @@ -526,37 +527,22 @@ static irqreturn_t macii_interrupt(int irq, void *arg) via[B] ^= ST_MASK; break; - case read_done: - x = via[SR]; - - if (bus_timeout) - reply_len = 0; - - if (reading_reply) { - reading_reply = 0; - req = current_req; - req->reply_len = reply_len; - req->complete = 1; - current_req = req->next; - if (req->done) - (*req->done)(req); - } else if (reply_len && autopoll_devs) - adb_input(reply_buf, reply_len, 0); - - macii_state = idle; + default: + break; + } + if (macii_state == idle) { if (!current_req) macii_queue_poll(); if (current_req) macii_start(); - if (macii_state == idle) + if (macii_state == idle) { + via[ACR] &= ~SR_OUT; + x = via[SR]; via[B] = (via[B] & ~ST_MASK) | ST_IDLE; - break; - - default: - break; + } } local_irq_restore(flags); -- cgit v1.2.3 From 624cf5b538b507293ec761797bd8ce0702fefe64 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Handle poll replies correctly Userspace applications may use /dev/adb to send Talk requests. Such requests always have req->reply_expected == 1. The same is true of Talk requests sent by the kernel, except for poll requests queued internally by the via-macii driver. Those requests have req->reply_expected == 0. Consequently, poll reply packets get treated like autopoll reply packets. (It doesn't make sense to try to distinguish them.) Always enter 'reading' state after a poll request, so that the reply gets collected and passed to adb_input(), and none go missing. All Talk replies passed to adb_input() come from polling or autopolling, so call adb_input() with the autopoll parameter set to 1. Fixes: d95fd5fce88f0 ("m68k: Mac II ADB fixes") # v5.0+ Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/754cddfa045e5bfa53e5da199831de02e7d2f27f.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index d29c87943ca4..8d5ef77b4a43 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -463,6 +463,21 @@ static irqreturn_t macii_interrupt(int irq, void *arg) via[ACR] &= ~SR_OUT; x = via[SR]; + } else if ((req->data[1] & OP_MASK) == TALK) { + macii_state = reading; + + reading_reply = 0; + reply_ptr = reply_buf; + *reply_ptr = req->data[1]; + reply_len = 1; + + via[ACR] &= ~SR_OUT; + x = via[SR]; + + req->complete = 1; + current_req = req->next; + if (req->done) + (*req->done)(req); } else { macii_state = idle; @@ -510,8 +525,9 @@ static irqreturn_t macii_interrupt(int irq, void *arg) current_req = req->next; if (req->done) (*req->done)(req); - } else if (reply_len && autopoll_devs) { - adb_input(reply_buf, reply_len, 0); + } else if (reply_len && autopoll_devs && + reply_buf[0] == last_poll_cmd) { + adb_input(reply_buf, reply_len, 1); } break; } -- cgit v1.2.3 From f87a162572c9f7c839a207c7de6c73ffe54a777c Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Use bool type for reading_reply variable Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/779551219a11b19e574dfcd87e4ef60af08c4fc3.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 8d5ef77b4a43..e143ddb81de3 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -116,7 +116,7 @@ static struct adb_request *current_req; /* first request struct in the queue */ static struct adb_request *last_req; /* last request struct in the queue */ static unsigned char reply_buf[16]; /* storage for autopolled replies */ static unsigned char *reply_ptr; /* next byte in reply_buf or req->reply */ -static int reading_reply; /* store reply in reply_buf else req->reply */ +static bool reading_reply; /* store reply in reply_buf else req->reply */ static int data_index; /* index of the next byte to send from req->data */ static int reply_len; /* number of bytes received in reply_buf or req->reply */ static int status; /* VIA's ADB status bits captured upon interrupt */ @@ -394,7 +394,7 @@ static irqreturn_t macii_interrupt(int irq, void *arg) WARN_ON((status & ST_MASK) != ST_IDLE); reply_ptr = reply_buf; - reading_reply = 0; + reading_reply = false; bus_timeout = false; srq_asserted = false; @@ -442,7 +442,7 @@ static irqreturn_t macii_interrupt(int irq, void *arg) */ macii_state = reading; - reading_reply = 0; + reading_reply = false; reply_ptr = reply_buf; *reply_ptr = last_talk_cmd; reply_len = 1; @@ -456,7 +456,7 @@ static irqreturn_t macii_interrupt(int irq, void *arg) if (req->reply_expected) { macii_state = reading; - reading_reply = 1; + reading_reply = true; reply_ptr = req->reply; *reply_ptr = req->data[1]; reply_len = 1; @@ -466,7 +466,7 @@ static irqreturn_t macii_interrupt(int irq, void *arg) } else if ((req->data[1] & OP_MASK) == TALK) { macii_state = reading; - reading_reply = 0; + reading_reply = false; reply_ptr = reply_buf; *reply_ptr = req->data[1]; reply_len = 1; -- cgit v1.2.3 From 5c0c15a1953a7de2878d7e6f5711fd3322b11faa Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Use unsigned type for autopoll_devs variable Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ca5be30ba745c08c2b7a1f0618f99c61b303e983.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index e143ddb81de3..447273967e1e 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -125,7 +125,7 @@ static bool srq_asserted; /* have to poll for the device that asserted it */ static u8 last_cmd; /* the most recent command byte transmitted */ static u8 last_talk_cmd; /* the most recent Talk command byte transmitted */ static u8 last_poll_cmd; /* the most recent Talk R0 command byte transmitted */ -static int autopoll_devs; /* bits set are device addresses to be polled */ +static unsigned int autopoll_devs; /* bits set are device addresses to poll */ /* Check for MacII style ADB */ static int macii_probe(void) @@ -291,7 +291,7 @@ static int macii_autopoll(int devs) local_irq_save(flags); /* bit 1 == device 1, and so on. */ - autopoll_devs = devs & 0xFFFE; + autopoll_devs = (unsigned int)devs & 0xFFFE; if (!current_req) { macii_queue_poll(); -- cgit v1.2.3 From 046ace8256489f32740da07de55a913ca09ce5cf Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Use the stack for reset request storage The adb_request struct can be stored on the stack because the request is synchronous and is completed before the function returns. Signed-off-by: Finn Thain Tested-by: Stan Johnson Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a40f80dde90991757007b6962c386a208c970586.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 447273967e1e..2f9be4ec7d34 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -313,7 +313,7 @@ static void macii_poll(void) /* Reset the bus */ static int macii_reset_bus(void) { - static struct adb_request req; + struct adb_request req; /* Command = 0, Address = ignored */ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_BUSRESET); -- cgit v1.2.3 From 3327e58a04501e06aa531cdb4044aab214a6254a Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 28 Jun 2020 14:23:12 +1000 Subject: macintosh/via-macii: Clarify definition of macii_init() The function prototype correctly specifies the 'static' storage class. Let the function definition match the declaration for better readability. Signed-off-by: Finn Thain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c025aed0b1506399b73ff1d1bfa40ed641fcb3e3.1593318192.git.fthain@telegraphics.com.au --- drivers/macintosh/via-macii.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 2f9be4ec7d34..060e03f2264b 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -140,7 +140,7 @@ static int macii_probe(void) } /* Initialize the driver */ -int macii_init(void) +static int macii_init(void) { unsigned long flags; int err; -- cgit v1.2.3 From dbce456280857f329af9069af5e48a9b6ebad146 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 24 Jul 2020 16:28:09 +0530 Subject: powerpc/numa: Limit possible nodes to within num_possible_nodes MAX_NUMNODES is a theoretical maximum number of nodes thats is supported by the kernel. Device tree properties exposes the number of possible nodes on the current platform. The kernel would detected this and would use it for most of its resource allocations. If the platform now increases the nodes to over what was already exposed, then it may lead to inconsistencies. Hence limit it to the already exposed nodes. Suggested-by: Nathan Lynch Signed-off-by: Srikar Dronamraju Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724105809.24733-1-srikar@linux.vnet.ibm.com --- arch/powerpc/mm/numa.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index e437a9ac4956..92938393fec6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -221,7 +221,8 @@ static void initialize_distance_lookup_table(int nid, } } -/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa +/* + * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA * info is found. */ static int associativity_to_nid(const __be32 *associativity) @@ -235,7 +236,7 @@ static int associativity_to_nid(const __be32 *associativity) nid = of_read_number(&associativity[min_common_depth], 1); /* POWER4 LPAR uses 0xffff as invalid node */ - if (nid == 0xffff || nid >= MAX_NUMNODES) + if (nid == 0xffff || nid >= nr_node_ids) nid = NUMA_NO_NODE; if (nid > 0 && @@ -448,7 +449,7 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb) index = lmb->aa_index * aa.array_sz + min_common_depth - 1; nid = of_read_number(&aa.arrays[index], 1); - if (nid == 0xffff || nid >= MAX_NUMNODES) + if (nid == 0xffff || nid >= nr_node_ids) nid = default_nid; if (nid > 0) { -- cgit v1.2.3 From 20d444d06f97504d165b08558678b4737dcefb02 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:18 +1000 Subject: powerpc/pseries: Move some PAPR paravirt functions to their own file These functions will be used by the queued spinlock implementation, and may be useful elsewhere too, so move them out of spinlock.h. Signed-off-by: Nicholas Piggin Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-2-npiggin@gmail.com --- arch/powerpc/include/asm/paravirt.h | 59 +++++++++++++++++++++++++++++++++++++ arch/powerpc/include/asm/spinlock.h | 24 +-------------- arch/powerpc/lib/locks.c | 12 ++++---- 3 files changed, 66 insertions(+), 29 deletions(-) create mode 100644 arch/powerpc/include/asm/paravirt.h diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h new file mode 100644 index 000000000000..339e8533464b --- /dev/null +++ b/arch/powerpc/include/asm/paravirt.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_PARAVIRT_H +#define _ASM_POWERPC_PARAVIRT_H + +#include +#include +#ifdef CONFIG_PPC64 +#include +#include +#endif + +#ifdef CONFIG_PPC_SPLPAR +DECLARE_STATIC_KEY_FALSE(shared_processor); + +static inline bool is_shared_processor(void) +{ + return static_branch_unlikely(&shared_processor); +} + +/* If bit 0 is set, the cpu has been preempted */ +static inline u32 yield_count_of(int cpu) +{ + __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count); + return be32_to_cpu(yield_count); +} + +static inline void yield_to_preempted(int cpu, u32 yield_count) +{ + plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); +} +#else +static inline bool is_shared_processor(void) +{ + return false; +} + +static inline u32 yield_count_of(int cpu) +{ + return 0; +} + +extern void ___bad_yield_to_preempted(void); +static inline void yield_to_preempted(int cpu, u32 yield_count) +{ + ___bad_yield_to_preempted(); /* This would be a bug */ +} +#endif + +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + if (!is_shared_processor()) + return false; + if (yield_count_of(cpu) & 1) + return true; + return false; +} + +#endif /* _ASM_POWERPC_PARAVIRT_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 2d620896cdae..79be9bb10bbb 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -15,11 +15,10 @@ * * (the type definitions are in asm/spinlock_types.h) */ -#include #include +#include #ifdef CONFIG_PPC64 #include -#include #endif #include #include @@ -35,18 +34,6 @@ #define LOCK_TOKEN 1 #endif -#ifdef CONFIG_PPC_PSERIES -DECLARE_STATIC_KEY_FALSE(shared_processor); - -#define vcpu_is_preempted vcpu_is_preempted -static inline bool vcpu_is_preempted(int cpu) -{ - if (!static_branch_unlikely(&shared_processor)) - return false; - return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); -} -#endif - static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.slock == 0; @@ -110,15 +97,6 @@ static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; #endif -static inline bool is_shared_processor(void) -{ -#ifdef CONFIG_PPC_SPLPAR - return static_branch_unlikely(&shared_processor); -#else - return false; -#endif -} - static inline void spin_yield(arch_spinlock_t *lock) { if (is_shared_processor()) diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 6440d5943c00..04165b7a163f 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -27,14 +27,14 @@ void splpar_spin_yield(arch_spinlock_t *lock) return; holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); - yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + + yield_count = yield_count_of(holder_cpu); if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); if (lock->slock != lock_value) return; /* something has changed */ - plpar_hcall_norets(H_CONFER, - get_hard_smp_processor_id(holder_cpu), yield_count); + yield_to_preempted(holder_cpu, yield_count); } EXPORT_SYMBOL_GPL(splpar_spin_yield); @@ -53,13 +53,13 @@ void splpar_rw_yield(arch_rwlock_t *rw) return; /* no write lock at present */ holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); - yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); + + yield_count = yield_count_of(holder_cpu); if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); if (rw->lock != lock_value) return; /* something has changed */ - plpar_hcall_norets(H_CONFER, - get_hard_smp_processor_id(holder_cpu), yield_count); + yield_to_preempted(holder_cpu, yield_count); } #endif -- cgit v1.2.3 From 12d0b9d6c843e7dbe739ebefcf16c7e4a45e4e78 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:19 +1000 Subject: powerpc: Move spinlock implementation to simple_spinlock To prepare for queued spinlocks. This is a simple rename except to update preprocessor guard name and a file reference. Signed-off-by: Nicholas Piggin Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-3-npiggin@gmail.com --- arch/powerpc/include/asm/simple_spinlock.h | 288 +++++++++++++++++++++++ arch/powerpc/include/asm/simple_spinlock_types.h | 21 ++ arch/powerpc/include/asm/spinlock.h | 285 +--------------------- arch/powerpc/include/asm/spinlock_types.h | 12 +- 4 files changed, 311 insertions(+), 295 deletions(-) create mode 100644 arch/powerpc/include/asm/simple_spinlock.h create mode 100644 arch/powerpc/include/asm/simple_spinlock_types.h diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h new file mode 100644 index 000000000000..9c3c30534333 --- /dev/null +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H +#define _ASM_POWERPC_SIMPLE_SPINLOCK_H + +/* + * Simple spin lock operations. + * + * Copyright (C) 2001-2004 Paul Mackerras , IBM + * Copyright (C) 2001 Anton Blanchard , IBM + * Copyright (C) 2002 Dave Engebretsen , IBM + * Rework to support virtual processors + * + * Type of int is used as a full 64b word is not necessary. + * + * (the type definitions are in asm/simple_spinlock_types.h) + */ +#include +#include +#include +#include +#include + +#ifdef CONFIG_PPC64 +/* use 0x800000yy when locked, where yy == CPU number */ +#ifdef __BIG_ENDIAN__ +#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) +#else +#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) +#endif +#else +#define LOCK_TOKEN 1 +#endif + +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.slock == 0; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + smp_mb(); + return !arch_spin_value_unlocked(*lock); +} + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned long tmp, token; + + token = LOCK_TOKEN; + __asm__ __volatile__( +"1: " PPC_LWARX(%0,0,%2,1) "\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n" + PPC_ACQUIRE_BARRIER +"2:" + : "=&r" (tmp) + : "r" (token), "r" (&lock->slock) + : "cr0", "memory"); + + return tmp; +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + return __arch_spin_trylock(lock) == 0; +} + +/* + * On a system with shared processors (that is, where a physical + * processor is multiplexed between several virtual processors), + * there is no point spinning on a lock if the holder of the lock + * isn't currently scheduled on a physical processor. Instead + * we detect this situation and ask the hypervisor to give the + * rest of our timeslice to the lock holder. + * + * So that we can tell which virtual processor is holding a lock, + * we put 0x80000000 | smp_processor_id() in the lock when it is + * held. Conveniently, we have a word in the paca that holds this + * value. + */ + +#if defined(CONFIG_PPC_SPLPAR) +/* We only yield to the hypervisor if we are in shared processor mode */ +void splpar_spin_yield(arch_spinlock_t *lock); +void splpar_rw_yield(arch_rwlock_t *lock); +#else /* SPLPAR */ +static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; +static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; +#endif + +static inline void spin_yield(arch_spinlock_t *lock) +{ + if (is_shared_processor()) + splpar_spin_yield(lock); + else + barrier(); +} + +static inline void rw_yield(arch_rwlock_t *lock) +{ + if (is_shared_processor()) + splpar_rw_yield(lock); + else + barrier(); +} + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + while (1) { + if (likely(__arch_spin_trylock(lock) == 0)) + break; + do { + HMT_low(); + if (is_shared_processor()) + splpar_spin_yield(lock); + } while (unlikely(lock->slock != 0)); + HMT_medium(); + } +} + +static inline +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +{ + unsigned long flags_dis; + + while (1) { + if (likely(__arch_spin_trylock(lock) == 0)) + break; + local_save_flags(flags_dis); + local_irq_restore(flags); + do { + HMT_low(); + if (is_shared_processor()) + splpar_spin_yield(lock); + } while (unlikely(lock->slock != 0)); + HMT_medium(); + local_irq_restore(flags_dis); + } +} +#define arch_spin_lock_flags arch_spin_lock_flags + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + __asm__ __volatile__("# arch_spin_unlock\n\t" + PPC_RELEASE_BARRIER: : :"memory"); + lock->slock = 0; +} + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +#ifdef CONFIG_PPC64 +#define __DO_SIGN_EXTEND "extsw %0,%0\n" +#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ +#else +#define __DO_SIGN_EXTEND +#define WRLOCK_TOKEN (-1) +#endif + +/* + * This returns the old value in the lock + 1, + * so we got a read lock if the return value is > 0. + */ +static inline long __arch_read_trylock(arch_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( +"1: " PPC_LWARX(%0,0,%1,1) "\n" + __DO_SIGN_EXTEND +" addic. %0,%0,1\n\ + ble- 2f\n" +" stwcx. %0,0,%1\n\ + bne- 1b\n" + PPC_ACQUIRE_BARRIER +"2:" : "=&r" (tmp) + : "r" (&rw->lock) + : "cr0", "xer", "memory"); + + return tmp; +} + +/* + * This returns the old value in the lock, + * so we got the write lock if the return value is 0. + */ +static inline long __arch_write_trylock(arch_rwlock_t *rw) +{ + long tmp, token; + + token = WRLOCK_TOKEN; + __asm__ __volatile__( +"1: " PPC_LWARX(%0,0,%2,1) "\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n" +" stwcx. %1,0,%2\n\ + bne- 1b\n" + PPC_ACQUIRE_BARRIER +"2:" : "=&r" (tmp) + : "r" (token), "r" (&rw->lock) + : "cr0", "memory"); + + return tmp; +} + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + while (1) { + if (likely(__arch_read_trylock(rw) > 0)) + break; + do { + HMT_low(); + if (is_shared_processor()) + splpar_rw_yield(rw); + } while (unlikely(rw->lock < 0)); + HMT_medium(); + } +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + while (1) { + if (likely(__arch_write_trylock(rw) == 0)) + break; + do { + HMT_low(); + if (is_shared_processor()) + splpar_rw_yield(rw); + } while (unlikely(rw->lock != 0)); + HMT_medium(); + } +} + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + return __arch_read_trylock(rw) > 0; +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + return __arch_write_trylock(rw) == 0; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( + "# read_unlock\n\t" + PPC_RELEASE_BARRIER +"1: lwarx %0,0,%1\n\ + addic %0,%0,-1\n" +" stwcx. %0,0,%1\n\ + bne- 1b" + : "=&r"(tmp) + : "r"(&rw->lock) + : "cr0", "xer", "memory"); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + __asm__ __volatile__("# write_unlock\n\t" + PPC_RELEASE_BARRIER: : :"memory"); + rw->lock = 0; +} + +#define arch_spin_relax(lock) spin_yield(lock) +#define arch_read_relax(lock) rw_yield(lock) +#define arch_write_relax(lock) rw_yield(lock) + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h new file mode 100644 index 000000000000..0f3cdd8faa95 --- /dev/null +++ b/arch/powerpc/include/asm/simple_spinlock_types.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H +#define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int slock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile signed int lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 79be9bb10bbb..21357fe05fe0 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -3,290 +3,7 @@ #define __ASM_SPINLOCK_H #ifdef __KERNEL__ -/* - * Simple spin lock operations. - * - * Copyright (C) 2001-2004 Paul Mackerras , IBM - * Copyright (C) 2001 Anton Blanchard , IBM - * Copyright (C) 2002 Dave Engebretsen , IBM - * Rework to support virtual processors - * - * Type of int is used as a full 64b word is not necessary. - * - * (the type definitions are in asm/spinlock_types.h) - */ -#include -#include -#ifdef CONFIG_PPC64 -#include -#endif -#include -#include - -#ifdef CONFIG_PPC64 -/* use 0x800000yy when locked, where yy == CPU number */ -#ifdef __BIG_ENDIAN__ -#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) -#else -#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) -#endif -#else -#define LOCK_TOKEN 1 -#endif - -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.slock == 0; -} - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - smp_mb(); - return !arch_spin_value_unlocked(*lock); -} - -/* - * This returns the old value in the lock, so we succeeded - * in getting the lock if the return value is 0. - */ -static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long tmp, token; - - token = LOCK_TOKEN; - __asm__ __volatile__( -"1: " PPC_LWARX(%0,0,%2,1) "\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n\ - stwcx. %1,0,%2\n\ - bne- 1b\n" - PPC_ACQUIRE_BARRIER -"2:" - : "=&r" (tmp) - : "r" (token), "r" (&lock->slock) - : "cr0", "memory"); - - return tmp; -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - return __arch_spin_trylock(lock) == 0; -} - -/* - * On a system with shared processors (that is, where a physical - * processor is multiplexed between several virtual processors), - * there is no point spinning on a lock if the holder of the lock - * isn't currently scheduled on a physical processor. Instead - * we detect this situation and ask the hypervisor to give the - * rest of our timeslice to the lock holder. - * - * So that we can tell which virtual processor is holding a lock, - * we put 0x80000000 | smp_processor_id() in the lock when it is - * held. Conveniently, we have a word in the paca that holds this - * value. - */ - -#if defined(CONFIG_PPC_SPLPAR) -/* We only yield to the hypervisor if we are in shared processor mode */ -void splpar_spin_yield(arch_spinlock_t *lock); -void splpar_rw_yield(arch_rwlock_t *lock); -#else /* SPLPAR */ -static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; -static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; -#endif - -static inline void spin_yield(arch_spinlock_t *lock) -{ - if (is_shared_processor()) - splpar_spin_yield(lock); - else - barrier(); -} - -static inline void rw_yield(arch_rwlock_t *lock) -{ - if (is_shared_processor()) - splpar_rw_yield(lock); - else - barrier(); -} - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - while (1) { - if (likely(__arch_spin_trylock(lock) == 0)) - break; - do { - HMT_low(); - if (is_shared_processor()) - splpar_spin_yield(lock); - } while (unlikely(lock->slock != 0)); - HMT_medium(); - } -} - -static inline -void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long flags_dis; - - while (1) { - if (likely(__arch_spin_trylock(lock) == 0)) - break; - local_save_flags(flags_dis); - local_irq_restore(flags); - do { - HMT_low(); - if (is_shared_processor()) - splpar_spin_yield(lock); - } while (unlikely(lock->slock != 0)); - HMT_medium(); - local_irq_restore(flags_dis); - } -} -#define arch_spin_lock_flags arch_spin_lock_flags - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - __asm__ __volatile__("# arch_spin_unlock\n\t" - PPC_RELEASE_BARRIER: : :"memory"); - lock->slock = 0; -} - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - */ - -#ifdef CONFIG_PPC64 -#define __DO_SIGN_EXTEND "extsw %0,%0\n" -#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ -#else -#define __DO_SIGN_EXTEND -#define WRLOCK_TOKEN (-1) -#endif - -/* - * This returns the old value in the lock + 1, - * so we got a read lock if the return value is > 0. - */ -static inline long __arch_read_trylock(arch_rwlock_t *rw) -{ - long tmp; - - __asm__ __volatile__( -"1: " PPC_LWARX(%0,0,%1,1) "\n" - __DO_SIGN_EXTEND -" addic. %0,%0,1\n\ - ble- 2f\n" -" stwcx. %0,0,%1\n\ - bne- 1b\n" - PPC_ACQUIRE_BARRIER -"2:" : "=&r" (tmp) - : "r" (&rw->lock) - : "cr0", "xer", "memory"); - - return tmp; -} - -/* - * This returns the old value in the lock, - * so we got the write lock if the return value is 0. - */ -static inline long __arch_write_trylock(arch_rwlock_t *rw) -{ - long tmp, token; - - token = WRLOCK_TOKEN; - __asm__ __volatile__( -"1: " PPC_LWARX(%0,0,%2,1) "\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n" -" stwcx. %1,0,%2\n\ - bne- 1b\n" - PPC_ACQUIRE_BARRIER -"2:" : "=&r" (tmp) - : "r" (token), "r" (&rw->lock) - : "cr0", "memory"); - - return tmp; -} - -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - while (1) { - if (likely(__arch_read_trylock(rw) > 0)) - break; - do { - HMT_low(); - if (is_shared_processor()) - splpar_rw_yield(rw); - } while (unlikely(rw->lock < 0)); - HMT_medium(); - } -} - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - while (1) { - if (likely(__arch_write_trylock(rw) == 0)) - break; - do { - HMT_low(); - if (is_shared_processor()) - splpar_rw_yield(rw); - } while (unlikely(rw->lock != 0)); - HMT_medium(); - } -} - -static inline int arch_read_trylock(arch_rwlock_t *rw) -{ - return __arch_read_trylock(rw) > 0; -} - -static inline int arch_write_trylock(arch_rwlock_t *rw) -{ - return __arch_write_trylock(rw) == 0; -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - long tmp; - - __asm__ __volatile__( - "# read_unlock\n\t" - PPC_RELEASE_BARRIER -"1: lwarx %0,0,%1\n\ - addic %0,%0,-1\n" -" stwcx. %0,0,%1\n\ - bne- 1b" - : "=&r"(tmp) - : "r"(&rw->lock) - : "cr0", "xer", "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - __asm__ __volatile__("# write_unlock\n\t" - PPC_RELEASE_BARRIER: : :"memory"); - rw->lock = 0; -} - -#define arch_spin_relax(lock) spin_yield(lock) -#define arch_read_relax(lock) rw_yield(lock) -#define arch_write_relax(lock) rw_yield(lock) - -/* See include/linux/spinlock.h */ -#define smp_mb__after_spinlock() smp_mb() +#include #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 87adaf13b7e8..3906f52dae65 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -6,16 +6,6 @@ # error "please don't include this file directly" #endif -typedef struct { - volatile unsigned int slock; -} arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } - -typedef struct { - volatile signed int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include #endif -- cgit v1.2.3 From aa65ff6b18e0366db1790609956a4ac7308c5668 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:20 +1000 Subject: powerpc/64s: Implement queued spinlocks and rwlocks These have shown significantly improved performance and fairness when spinlock contention is moderate to high on very large systems. With this series including subsequent patches, on a 16 socket 1536 thread POWER9, a stress test such as same-file open/close from all CPUs gets big speedups, 11620op/s aggregate with simple spinlocks vs 384158op/s (33x faster), where the difference in throughput between the fastest and slowest thread goes from 7x to 1.4x. Thanks to the fast path being identical in terms of atomics and barriers (after a subsequent optimisation patch), single threaded performance is not changed (no measurable difference). On smaller systems, performance and fairness seems to be generally improved. Using dbench on tmpfs as a test (that starts to run into kernel spinlock contention), a 2-socket OpenPOWER POWER9 system was tested with bare metal and KVM guest configurations. Results can be found here: https://github.com/linuxppc/issues/issues/305#issuecomment-663487453 Observations are: - Queued spinlocks are equal when contention is insignificant, as expected and as measured with microbenchmarks. - When there is contention, on bare metal queued spinlocks have better throughput and max latency at all points. - When virtualised, queued spinlocks are slightly worse approaching peak throughput, but significantly better throughput and max latency at all points beyond peak, until queued spinlock maximum latency rises when clients are 2x vCPUs. The regressions haven't been analysed very well yet, there are a lot of things that can be tuned, particularly the paravirtualised locking, but the numbers already look like a good net win even on relatively small systems. Signed-off-by: Nicholas Piggin Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-4-npiggin@gmail.com --- arch/powerpc/Kconfig | 15 +++++++++++++++ arch/powerpc/include/asm/Kbuild | 1 + arch/powerpc/include/asm/qspinlock.h | 25 +++++++++++++++++++++++++ arch/powerpc/include/asm/spinlock.h | 5 +++++ arch/powerpc/include/asm/spinlock_types.h | 5 +++++ arch/powerpc/lib/Makefile | 3 +++ include/asm-generic/qspinlock.h | 2 ++ 7 files changed, 56 insertions(+) create mode 100644 arch/powerpc/include/asm/qspinlock.h diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 81c0dee1cbff..a751edacf4bc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -146,6 +146,8 @@ config PPC select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 + select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS + select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF @@ -491,6 +493,19 @@ config HOTPLUG_CPU Say N if you are unsure. +config PPC_QUEUED_SPINLOCKS + bool "Queued spinlocks" + depends on SMP + help + Say Y here to use queued spinlocks which give better scalability and + fairness on large SMP and NUMA systems without harming single threaded + performance. + + This option is currently experimental, the code is more complex and + less tested so it defaults to "N" for the moment. + + If unsure, say "N". + config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index dadbcf3a0b1e..27c2268dfd6c 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -6,5 +6,6 @@ generated-y += syscall_table_spu.h generic-y += export.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += qrwlock.h generic-y += vtime.h generic-y += early_ioremap.h diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h new file mode 100644 index 000000000000..c49e33e24edd --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_QSPINLOCK_H +#define _ASM_POWERPC_QSPINLOCK_H + +#include + +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ + +#define smp_mb__after_spinlock() smp_mb() + +static __always_inline int queued_spin_is_locked(struct qspinlock *lock) +{ + /* + * This barrier was added to simple spinlocks by commit 51d7d5205d338, + * but it should now be possible to remove it, asm arm64 has done with + * commit c6f5d02b6a0f. + */ + smp_mb(); + return atomic_read(&lock->val); +} +#define queued_spin_is_locked queued_spin_is_locked + +#include + +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 21357fe05fe0..434615f1d761 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -3,7 +3,12 @@ #define __ASM_SPINLOCK_H #ifdef __KERNEL__ +#ifdef CONFIG_PPC_QUEUED_SPINLOCKS +#include +#include +#else #include +#endif #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 3906f52dae65..c5d742f18021 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -6,6 +6,11 @@ # error "please don't include this file directly" #endif +#ifdef CONFIG_PPC_QUEUED_SPINLOCKS +#include +#include +#else #include +#endif #endif diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 5e994cda8e40..d66a645503eb 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -41,7 +41,10 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ memcpy_64.o memcpy_mcsafe_64.o +ifndef CONFIG_PPC_QUEUED_SPINLOCKS obj64-$(CONFIG_SMP) += locks.o +endif + obj64-$(CONFIG_ALTIVEC) += vmx-helper.o obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \ test_emulate_step_exec_instr.o diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index fde943d180e0..fb0a814d4395 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -12,6 +12,7 @@ #include +#ifndef queued_spin_is_locked /** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure @@ -25,6 +26,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ return atomic_read(&lock->val); } +#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? -- cgit v1.2.3 From 20c0e8269e9d515e677670902c7e1cc0209d6ad9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:21 +1000 Subject: powerpc/pseries: Implement paravirt qspinlocks for SPLPAR This implements the generic paravirt qspinlocks using H_PROD and H_CONFER to kick and wait. This uses an un-directed yield to any CPU rather than the directed yield to a pre-empted lock holder that paravirtualised simple spinlocks use, that requires no kick hcall. This is something that could be investigated and improved in future. Performance results can be found in the commit which added queued spinlocks. Signed-off-by: Nicholas Piggin Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-5-npiggin@gmail.com --- arch/powerpc/include/asm/paravirt.h | 28 ++++++++++++ arch/powerpc/include/asm/qspinlock.h | 66 +++++++++++++++++++++++++++ arch/powerpc/include/asm/qspinlock_paravirt.h | 7 +++ arch/powerpc/include/asm/spinlock.h | 4 ++ arch/powerpc/platforms/pseries/Kconfig | 9 +++- arch/powerpc/platforms/pseries/setup.c | 4 +- include/asm-generic/qspinlock.h | 2 + 7 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index 339e8533464b..9362c94fe3aa 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -28,6 +28,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); } + +static inline void prod_cpu(int cpu) +{ + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); +} + +static inline void yield_to_any(void) +{ + plpar_hcall_norets(H_CONFER, -1, 0); +} #else static inline bool is_shared_processor(void) { @@ -44,6 +54,19 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { ___bad_yield_to_preempted(); /* This would be a bug */ } + +extern void ___bad_yield_to_any(void); +static inline void yield_to_any(void) +{ + ___bad_yield_to_any(); /* This would be a bug */ +} + +extern void ___bad_prod_cpu(void); +static inline void prod_cpu(int cpu) +{ + ___bad_prod_cpu(); /* This would be a bug */ +} + #endif #define vcpu_is_preempted vcpu_is_preempted @@ -56,4 +79,9 @@ static inline bool vcpu_is_preempted(int cpu) return false; } +static inline bool pv_is_native_spin_unlock(void) +{ + return !is_shared_processor(); +} + #endif /* _ASM_POWERPC_PARAVIRT_H */ diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index c49e33e24edd..f5066f00a08c 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,47 @@ #define _ASM_POWERPC_QSPINLOCK_H #include +#include #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_slowpath(lock, val); +} + +#define queued_spin_unlock queued_spin_unlock +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + if (!is_shared_processor()) + smp_store_release(&lock->locked, 0); + else + __pv_queued_spin_unlock(lock); +} + +#else +extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#endif + +static __always_inline void queued_spin_lock(struct qspinlock *lock) +{ + u32 val = 0; + + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + return; + + queued_spin_lock_slowpath(lock, val); +} +#define queued_spin_lock queued_spin_lock + #define smp_mb__after_spinlock() smp_mb() static __always_inline int queued_spin_is_locked(struct qspinlock *lock) @@ -20,6 +58,34 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) } #define queued_spin_is_locked queued_spin_is_locked +#ifdef CONFIG_PARAVIRT_SPINLOCKS +#define SPIN_THRESHOLD (1<<15) /* not tuned */ + +static __always_inline void pv_wait(u8 *ptr, u8 val) +{ + if (*ptr != val) + return; + yield_to_any(); + /* + * We could pass in a CPU here if waiting in the queue and yield to + * the previous CPU in the queue. + */ +} + +static __always_inline void pv_kick(int cpu) +{ + prod_cpu(cpu); +} + +extern void __pv_init_lock_hash(void); + +static inline void pv_spinlocks_init(void) +{ + __pv_init_lock_hash(); +} + +#endif + #include #endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..6b60e7736a47 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock_paravirt.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_QSPINLOCK_PARAVIRT_H +#define _ASM_POWERPC_QSPINLOCK_PARAVIRT_H + +EXPORT_SYMBOL(__pv_queued_spin_unlock); + +#endif /* _ASM_POWERPC_QSPINLOCK_PARAVIRT_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 434615f1d761..6ec72282888d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -10,5 +10,9 @@ #include #endif +#ifndef CONFIG_PARAVIRT_SPINLOCKS +static inline void pv_spinlocks_init(void) { } +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 24c18362e5ea..5e037df2a3a1 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -25,15 +25,22 @@ config PPC_PSERIES select SWIOTLB default y +config PARAVIRT_SPINLOCKS + bool + config PPC_SPLPAR - depends on PPC_PSERIES bool "Support for shared-processor logical partitions" + depends on PPC_PSERIES + select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS + default y help Enabling this option will make the kernel run more efficiently on logically-partitioned pSeries systems which use shared processors, that is, which share physical processors between two or more partitions. + Say Y if you are unsure. + config DTL bool "Dispatch Trace Log" depends on PPC_SPLPAR && DEBUG_FS diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ba34eb23e8f5..e29c9bf0a3b9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -773,8 +773,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); - if (lppaca_shared_proc(get_lppaca())) + if (lppaca_shared_proc(get_lppaca())) { static_branch_enable(&shared_processor); + pv_spinlocks_init(); + } ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index fb0a814d4395..38ca14e79a86 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -69,6 +69,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock) extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#ifndef queued_spin_lock /** * queued_spin_lock - acquire a queued spinlock * @lock: Pointer to queued spinlock structure @@ -82,6 +83,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) queued_spin_lock_slowpath(lock, val); } +#endif #ifndef queued_spin_unlock /** -- cgit v1.2.3 From 2f6560e652dfdbdb59df28b45a3458bf36d3c580 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:22 +1000 Subject: powerpc/qspinlock: Optimised atomic_try_cmpxchg_lock() that adds the lock hint This brings the behaviour of the uncontended fast path back to roughly equivalent to simple spinlocks -- a single atomic op with lock hint. Signed-off-by: Nicholas Piggin Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-6-npiggin@gmail.com --- arch/powerpc/include/asm/atomic.h | 28 ++++++++++++++++++++++++++++ arch/powerpc/include/asm/qspinlock.h | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 498785ffc25f..f6a3d145ffb7 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -193,6 +193,34 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) +/* + * Don't want to override the generic atomic_try_cmpxchg_acquire, because + * we add a lock hint to the lwarx, which may not be wanted for the + * _acquire case (and is not used by the other _acquire variants so it + * would be a surprise). + */ +static __always_inline bool +atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) +{ + int r, o = *old; + + __asm__ __volatile__ ( +"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n" +" cmpw 0,%0,%3 \n" +" bne- 2f \n" +" stwcx. %4,0,%2 \n" +" bne- 1b \n" +"\t" PPC_ACQUIRE_BARRIER " \n" +"2: \n" + : "=&r" (r), "+m" (v->counter) + : "r" (&v->counter), "r" (o), "r" (new) + : "cr0", "memory"); + + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} + /** * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index f5066f00a08c..b752d34517b3 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) { u32 val = 0; - if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) return; queued_spin_lock_slowpath(lock, val); -- cgit v1.2.3 From 49a7d46a06c30c7beabbf9d1a8ea1de0f9e4fdfe Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:23 +1000 Subject: powerpc: Implement smp_cond_load_relaxed() This implements smp_cond_load_relaxed() with the slowpath busy loop using the preferred SMT priority pattern. Signed-off-by: Nicholas Piggin Acked-by: Waiman Long [mpe: Make it 64-bit only to fix build errors on 32-bit] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-7-npiggin@gmail.com --- arch/powerpc/include/asm/barrier.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 35c1b8f3aa68..f53c42380832 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -80,6 +80,22 @@ do { \ ___p1; \ }) +#ifdef CONFIG_PPC64 +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + VAL = READ_ONCE(*__PTR); \ + if (unlikely(!(cond_expr))) { \ + spin_begin(); \ + do { \ + VAL = READ_ONCE(*__PTR); \ + } while (!(cond_expr)); \ + spin_end(); \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif + #ifdef CONFIG_PPC_BOOK3S_64 #define NOSPEC_BARRIER_SLOT nop #elif defined(CONFIG_PPC_FSL_BOOK3E) -- cgit v1.2.3 From faedc380129501bdd7f669bf14e9c7ee3e7a2feb Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Fri, 24 Jul 2020 15:49:01 -0700 Subject: powerpc/64s: allow for clang's objdump differences Clang's objdump emits slightly different output from GNU's objdump, causing a list of warnings to be emitted during relocatable builds. E.g., clang's objdump emits this: c000000000000004: 2c 00 00 48 b 0xc000000000000030 ... c000000000005c6c: 10 00 82 40 bf 2, 0xc000000000005c7c while GNU objdump emits: c000000000000004: 2c 00 00 48 b c000000000000030 <__start+0x30> ... c000000000005c6c: 10 00 82 40 bne c000000000005c7c Adjust llvm-objdump's output to remove the extraneous '0x' and convert 'bf' and 'bt' to 'bne' and 'beq' resp. to more closely match GNU objdump's output. Note that clang's objdump doesn't yet output the relocation symbols on PPC. Signed-off-by: Bill Wendling Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/191c67db31264b69cf6b566fd69851beb3dd0abb.1595630874.git.morbo@google.com --- arch/powerpc/tools/unrel_branch_check.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh index 77114755dc6f..6e6a30aea3ed 100755 --- a/arch/powerpc/tools/unrel_branch_check.sh +++ b/arch/powerpc/tools/unrel_branch_check.sh @@ -31,7 +31,10 @@ grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]] grep -v '\<__start_initialization_multiplatform>' | grep -v -e 'b.\?.\?ctr' | grep -v -e 'b.\?.\?lr' | -sed 's/://' | +sed -e 's/\bbt.\?[[:space:]]*[[:digit:]][[:digit:]]*,/beq/' \ + -e 's/\bbf.\?[[:space:]]*[[:digit:]][[:digit:]]*,/bne/' \ + -e 's/[[:space:]]0x/ /' \ + -e 's/://' | awk '{ print $1 ":" $6 ":0x" $7 ":" $8 " "}' ) -- cgit v1.2.3 From 19a551b254e6c308348a46a65332aa03c01767ed Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 25 Jul 2020 17:19:49 +0800 Subject: powerpc/papr_scm: Make some symbols static The sparse tool complains as follows: arch/powerpc/platforms/pseries/papr_scm.c:97:1: warning: symbol 'papr_nd_regions' was not declared. Should it be static? arch/powerpc/platforms/pseries/papr_scm.c:98:1: warning: symbol 'papr_ndr_lock' was not declared. Should it be static? Those variables are not used outside of papr_scm.c, so this commit marks them static. Fixes: 85343a8da2d9 ("powerpc/papr/scm: Add bad memory ranges to nvdimm bad ranges") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200725091949.75234-1-weiyongjun1@huawei.com --- arch/powerpc/platforms/pseries/papr_scm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 8fd441d32487..3d1235a76ba9 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -94,8 +94,8 @@ struct papr_scm_priv { u64 health_bitmap; }; -LIST_HEAD(papr_nd_regions); -DEFINE_MUTEX(papr_ndr_lock); +static LIST_HEAD(papr_nd_regions); +static DEFINE_MUTEX(papr_ndr_lock); static int drc_pmem_bind(struct papr_scm_priv *p) { -- cgit v1.2.3 From ccc8fcf72a6953fbfd6998999d622295f522b952 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:20 +0000 Subject: powerpc/lib: Prepare code-patching for modules allocated outside vmalloc space Use is_vmalloc_or_module_addr() instead of is_vmalloc_addr() Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7d884db0e5a6f521331639d8c0f13e520d5a4fef.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/lib/code-patching.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 0a051dfeb177..8c3934ea6220 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -93,7 +93,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr) unsigned long pfn; int err; - if (is_vmalloc_addr(addr)) + if (is_vmalloc_or_module_addr(addr)) pfn = vmalloc_to_pfn(addr); else pfn = __pa_symbol(addr) >> PAGE_SHIFT; -- cgit v1.2.3 From 7fbc22ce29931630da200cfc90fe5a454f54a794 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:21 +0000 Subject: powerpc: Use MODULES_VADDR if defined In order to allow allocation of modules outside of vmalloc space, use MODULES_VADDR and MODULES_END when MODULES_VADDR is defined. Redefine module_alloc() when MODULES_VADDR defined. Unmap corresponding KASAN shadow memory. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7ecf5fff1eef67d450e73fc412b6ec3818483d75.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/module.c | 11 +++++++++++ arch/powerpc/mm/kasan/kasan_init_32.c | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index df649acb5631..a211b0253cdb 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -86,3 +86,14 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } + +#ifdef MODULES_VADDR +void *module_alloc(unsigned long size) +{ + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, + PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, + __builtin_return_address(0)); +} +#endif diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 019b0c0bbbf3..fb294046e00e 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -115,6 +115,12 @@ static void __init kasan_unmap_early_shadow_vmalloc(void) unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END); kasan_update_early_region(k_start, k_end, __pte(0)); + +#ifdef MODULES_VADDR + k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR); + k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END); + kasan_update_early_region(k_start, k_end, __pte(0)); +#endif } void __init kasan_mmu_init(void) -- cgit v1.2.3 From c496433197154144c310a17939736bc5c155914d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:22 +0000 Subject: powerpc/32s: Only leave NX unset on segments used for modules Instead of leaving NX unset on all segments above the start of vmalloc space, only leave NX unset on segments used for modules. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7172c0f5253419315e434a1816ee3d6ed6505bc0.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/book3s32/mmu.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 03b6ba54460e..c0162911f6cb 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -187,6 +187,17 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return __mmu_mapin_ram(border, top); } +static bool is_module_segment(unsigned long addr) +{ + if (!IS_ENABLED(CONFIG_MODULES)) + return false; + if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M)) + return false; + if (addr >= ALIGN(VMALLOC_END, SZ_256M)) + return false; + return true; +} + void mmu_mark_initmem_nx(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; @@ -223,9 +234,9 @@ void mmu_mark_initmem_nx(void) for (i = TASK_SIZE >> 28; i < 16; i++) { /* Do not set NX on VM space for modules */ - if (IS_ENABLED(CONFIG_MODULES) && - (VMALLOC_START & 0xf0000000) == i << 28) - break; + if (is_module_segment(i << 28)) + continue; + mtsrin(mfsrin(i << 28) | 0x10000000, i << 28); } } -- cgit v1.2.3 From b6be1bb7f7216b9e9f33f57abe6e3290c0e66bd4 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:23 +0000 Subject: powerpc/32: Set user/kernel boundary at TASK_SIZE instead of PAGE_OFFSET User space stops at TASK_SIZE. At the moment, kernel space starts at PAGE_OFFSET. In order to use space between TASK_SIZE and PAGE_OFFSET for modules, make TASK_SIZE the limit between user and kernel space. Note that fault.c already considers TASK_SIZE as the boundary between user and kernel space. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b38b52cd8dabbb56fbd6f9219d6f3cdccbb43b44.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/page.h | 4 +++- arch/powerpc/mm/ptdump/ptdump.c | 8 ++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index a63fe6f3a0ff..254687258f42 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -255,8 +255,10 @@ static inline bool pfn_valid(unsigned long pfn) */ #ifdef CONFIG_PPC_BOOK3E_64 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul) -#else +#elif defined(CONFIG_PPC_BOOK3S_64) #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) +#else +#define is_kernel_addr(x) ((x) >= TASK_SIZE) #endif #ifndef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index de6e05ef871c..9d942136c7be 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -348,7 +348,11 @@ static void populate_markers(void) { int i = 0; +#ifdef CONFIG_PPC64 address_markers[i++].start_address = PAGE_OFFSET; +#else + address_markers[i++].start_address = TASK_SIZE; +#endif address_markers[i++].start_address = VMALLOC_START; address_markers[i++].start_address = VMALLOC_END; #ifdef CONFIG_PPC64 @@ -385,7 +389,7 @@ static int ptdump_show(struct seq_file *m, void *v) struct pg_state st = { .seq = m, .marker = address_markers, - .start_address = PAGE_OFFSET, + .start_address = IS_ENABLED(CONFIG_PPC64) ? PAGE_OFFSET : TASK_SIZE, }; #ifdef CONFIG_PPC64 @@ -429,7 +433,7 @@ void ptdump_check_wx(void) .seq = NULL, .marker = address_markers, .check_wx = true, - .start_address = PAGE_OFFSET, + .start_address = IS_ENABLED(CONFIG_PPC64) ? PAGE_OFFSET : TASK_SIZE, }; #ifdef CONFIG_PPC64 -- cgit v1.2.3 From f1a1f7a15eb0e13b84791ff2738b84e414501718 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:24 +0000 Subject: powerpc/32s: Kernel space starts at TASK_SIZE Kernel space starts at TASK_SIZE. Select kernel page table when address is over TASK_SIZE. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/893425e32cd0a003539573b2d115e0ffa98bc26c.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/head_32.S | 12 ++++++------ arch/powerpc/mm/book3s32/hash_low.S | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 705c042309d8..bbef6ce8322b 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -474,7 +474,7 @@ InstructionTLBMiss: /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_IMISS #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) - lis r1,PAGE_OFFSET@h /* check if kernel address */ + lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 #endif mfspr r2, SPRN_SPRG_PGDIR @@ -484,7 +484,7 @@ InstructionTLBMiss: li r1,_PAGE_PRESENT | _PAGE_EXEC #endif #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) - bge- 112f + bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ #endif @@ -541,7 +541,7 @@ DataLoadTLBMiss: */ /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_DMISS - lis r1,PAGE_OFFSET@h /* check if kernel address */ + lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR #ifdef CONFIG_SWAP @@ -549,7 +549,7 @@ DataLoadTLBMiss: #else li r1, _PAGE_PRESENT #endif - bge- 112f + bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ @@ -621,7 +621,7 @@ DataStoreTLBMiss: */ /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_DMISS - lis r1,PAGE_OFFSET@h /* check if kernel address */ + lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR #ifdef CONFIG_SWAP @@ -629,7 +629,7 @@ DataStoreTLBMiss: #else li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT #endif - bge- 112f + bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 923ad8f374eb..1690d369688b 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -62,7 +62,7 @@ _GLOBAL(hash_page) isync #endif /* Get PTE (linux-style) and check access */ - lis r0,KERNELBASE@h /* check if kernel address */ + lis r0, TASK_SIZE@h /* check if kernel address */ cmplw 0,r4,r0 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */ -- cgit v1.2.3 From 6ca055322da8fe25ff9ac50db6f3b7b59b6f961c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:15:26 +0000 Subject: powerpc/32s: Use dedicated segment for modules with STRICT_KERNEL_RWX When STRICT_KERNEL_RWX is set, we want to set NX bit on vmalloc segments. But modules require exec. Use a dedicated segment for modules. There is not much space above kernel, and we don't waste vmalloc space to do alignment. Therefore, we take the segment before PAGE_OFFSET for modules. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/eb8faba9148b6cf17c696ba776b4e8ee2f6313bf.1593428200.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/book3s/32/pgtable.h | 15 +++++---------- arch/powerpc/mm/ptdump/ptdump.c | 8 ++++++++ 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a751edacf4bc..8c7656cc10eb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1213,6 +1213,7 @@ config TASK_SIZE_BOOL config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx + default "0xb0000000" if PPC_BOOK3S_32 && STRICT_KERNEL_RWX default "0xc0000000" endmenu diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 224912432821..36443cda8dcf 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -184,17 +184,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ -/* - * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules - * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear - * memory shall not share segments. - */ -#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES) -#define VMALLOC_START ((ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ - ~(VMALLOC_OFFSET - 1)) -#else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) -#endif #ifdef CONFIG_KASAN_VMALLOC #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) @@ -202,6 +192,11 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #define VMALLOC_END ioremap_bot #endif +#ifdef CONFIG_STRICT_KERNEL_RWX +#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) +#define MODULES_VADDR (MODULES_END - SZ_256M) +#endif + #ifndef __ASSEMBLY__ #include #include diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 9d942136c7be..b2ed1ca4f254 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -74,6 +74,10 @@ struct addr_marker { static struct addr_marker address_markers[] = { { 0, "Start of kernel VM" }, +#ifdef MODULES_VADDR + { 0, "modules start" }, + { 0, "modules end" }, +#endif { 0, "vmalloc() Area" }, { 0, "vmalloc() End" }, #ifdef CONFIG_PPC64 @@ -352,6 +356,10 @@ static void populate_markers(void) address_markers[i++].start_address = PAGE_OFFSET; #else address_markers[i++].start_address = TASK_SIZE; +#endif +#ifdef MODULES_VADDR + address_markers[i++].start_address = MODULES_VADDR; + address_markers[i++].start_address = MODULES_END; #endif address_markers[i++].start_address = VMALLOC_START; address_markers[i++].start_address = VMALLOC_END; -- cgit v1.2.3 From 846feeace51bce13f5c645d5bf162455b89841fd Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:17:18 +0000 Subject: powerpc/ptdump: Refactor update of st->last_pa st->last_pa is always updated in note_page() so it can be done outside the if/elseif/else block. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/610d6b1a60ad0bedef865a90153c1110cfaa507e.1593429426.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/ptdump.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index b2ed1ca4f254..20a039867934 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -211,7 +211,6 @@ static void note_page(struct pg_state *st, unsigned long addr, st->current_flags = flag; st->start_address = addr; st->start_pa = pa; - st->last_pa = pa; st->page_size = page_size; pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); /* @@ -251,13 +250,11 @@ static void note_page(struct pg_state *st, unsigned long addr, } st->start_address = addr; st->start_pa = pa; - st->last_pa = pa; st->page_size = page_size; st->current_flags = flag; st->level = level; - } else { - st->last_pa = pa; } + st->last_pa = pa; } static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) -- cgit v1.2.3 From e54e30bca40233139290aecfce932dea9b996516 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 29 Jun 2020 11:17:19 +0000 Subject: powerpc/ptdump: Refactor update of pg_state In note_page(), the pg_state is updated the same way in two places. Add note_page_update_state() to do it. Also include the display of boundary markers there as it is missing "no level" leg, leading to a mismatch when the first two markers are at the same address and the first displayed area uses that address. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a284a809f01c705bbaab303b06fda216f147a99a.1593429426.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/ptdump/ptdump.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 20a039867934..c911cd757f7d 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -199,6 +199,24 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) st->wx_pages += (addr - st->start_address) / PAGE_SIZE; } +static void note_page_update_state(struct pg_state *st, unsigned long addr, + unsigned int level, u64 val, unsigned long page_size) +{ + u64 flag = val & pg_level[level].mask; + u64 pa = val & PTE_RPN_MASK; + + st->level = level; + st->current_flags = flag; + st->start_address = addr; + st->start_pa = pa; + st->page_size = page_size; + + while (addr >= st->marker[1].start_address) { + st->marker++; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } +} + static void note_page(struct pg_state *st, unsigned long addr, unsigned int level, u64 val, unsigned long page_size) { @@ -207,12 +225,8 @@ static void note_page(struct pg_state *st, unsigned long addr, /* At first no level is set */ if (!st->level) { - st->level = level; - st->current_flags = flag; - st->start_address = addr; - st->start_pa = pa; - st->page_size = page_size; pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + note_page_update_state(st, addr, level, val, page_size); /* * Dump the section of virtual memory when: * - the PTE flags from one entry to the next differs. @@ -244,15 +258,7 @@ static void note_page(struct pg_state *st, unsigned long addr, * Address indicates we have passed the end of the * current section of virtual memory */ - while (addr >= st->marker[1].start_address) { - st->marker++; - pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); - } - st->start_address = addr; - st->start_pa = pa; - st->page_size = page_size; - st->current_flags = flag; - st->level = level; + note_page_update_state(st, addr, level, val, page_size); } st->last_pa = pa; } -- cgit v1.2.3 From e2802618970566277cf5cf5c99df66f21ee83766 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 26 Apr 2019 19:36:30 +0800 Subject: powerpc/lib: remove memcpy_flushcache redundant return Align it with other architectures and none of the callers has been interested its return Signed-off-by: Li RongQing Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1556278590-14727-1-git-send-email-lirongqing@baidu.com --- arch/powerpc/include/asm/string.h | 2 +- arch/powerpc/lib/pmem.c | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index b72692702f35..283552cd0e58 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -28,7 +28,7 @@ extern void * memcpy(void *,const void *,__kernel_size_t); extern void * memmove(void *,const void *,__kernel_size_t); extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); -extern void * memcpy_flushcache(void *,const void *,__kernel_size_t); +void memcpy_flushcache(void *dest, const void *src, size_t size); void *__memset(void *s, int c, __kernel_size_t count); void *__memcpy(void *to, const void *from, __kernel_size_t n); diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index 21210fa676e5..1550e0d2513a 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -76,14 +76,12 @@ long __copy_from_user_flushcache(void *dest, const void __user *src, return copied; } -void *memcpy_flushcache(void *dest, const void *src, size_t size) +void memcpy_flushcache(void *dest, const void *src, size_t size) { unsigned long start = (unsigned long) dest; memcpy(dest, src, size); clean_pmem_range(start, start + size); - - return dest; } EXPORT_SYMBOL(memcpy_flushcache); -- cgit v1.2.3 From 10a4a016d6a882ba7601159b0f719330b102c41b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:01 -0700 Subject: powerpc/book3s/mmu-hash.h: delete duplicated word Drop the repeated word "below". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-2-rdunlap@infradead.org --- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 58fcc959f9d5..93d18da5e7ec 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -793,7 +793,7 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, } /* - * For kernel space, we use context ids as below + * For kernel space, we use context ids as * below. Range is 512TB per context. * * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff] -- cgit v1.2.3 From 92be1fca08eabe8ab083b1dfccd3e932b4fb6f1a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:02 -0700 Subject: powerpc/book3s/radix-4k.h: delete duplicated word Drop the repeated word "per". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-3-rdunlap@infradead.org --- arch/powerpc/include/asm/book3s/64/radix-4k.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h index d5f5ab73dc7f..035ceecd6d67 100644 --- a/arch/powerpc/include/asm/book3s/64/radix-4k.h +++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h @@ -11,7 +11,7 @@ #define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB /* - * One fragment per per page + * One fragment per page */ #define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3) #define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT) -- cgit v1.2.3 From dc9bf323d6b8996d22c111add0ac8b0c895dcf52 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:03 -0700 Subject: powerpc/cputime.h: delete duplicated word Drop the repeated word "use". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-4-rdunlap@infradead.org --- arch/powerpc/include/asm/cputime.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 9335b93924b4..ed75d1c318e3 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -67,7 +67,7 @@ static inline void arch_vtime_task_switch(struct task_struct *prev) /* * account_cpu_user_entry/exit runs "unreconciled", so can't trace, - * can't use use get_paca() + * can't use get_paca() */ static notrace inline void account_cpu_user_entry(void) { -- cgit v1.2.3 From 8965aa4b684f022c4d0bc6429097ddb38a26eaef Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:04 -0700 Subject: powerpc/epapr_hcalls.h: delete duplicated words Drop the repeated words "file" and "the". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-5-rdunlap@infradead.org --- arch/powerpc/include/asm/epapr_hcalls.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index d3a7e36f1402..c99ba08a408d 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -37,7 +37,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* A "hypercall" is an "sc 1" instruction. This header file file provides C +/* A "hypercall" is an "sc 1" instruction. This header file provides C * wrapper functions for the ePAPR hypervisor interface. It is inteded * for use by Linux device drivers and other operating systems. * @@ -246,7 +246,7 @@ static inline unsigned int ev_int_get_mask(unsigned int interrupt, * ev_int_eoi - signal the end of interrupt processing * @interrupt: the interrupt number * - * This function signals the end of processing for the the specified + * This function signals the end of processing for the specified * interrupt, which must be the interrupt currently in service. By * definition, this is also the highest-priority interrupt. * -- cgit v1.2.3 From 028cc22d29959b501add32fc62660e5484c8092d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:05 -0700 Subject: powerpc/hw_breakpoint.h: delete duplicated word Drop the repeated word "the". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-6-rdunlap@infradead.org --- arch/powerpc/include/asm/hw_breakpoint.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index c89250b6ac34..db206a7f38e2 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -19,7 +19,7 @@ struct arch_hw_breakpoint { u16 hw_len; /* length programmed in hw */ }; -/* Note: Don't change the the first 6 bits below as they are in the same order +/* Note: Don't change the first 6 bits below as they are in the same order * as the dabr and dabrx. */ #define HW_BRK_TYPE_READ 0x01 -- cgit v1.2.3 From db10f5500004268b29e3c5bfd1e44ef53a1e25c9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:06 -0700 Subject: powerpc/ppc_asm.h: delete duplicated word Drop the repeated word "in". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-7-rdunlap@infradead.org --- arch/powerpc/include/asm/ppc_asm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 160f3bb77ea4..b4cc6608131c 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -776,7 +776,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) #define FIXUP_ENDIAN #else /* - * This version may be used in in HV or non-HV context. + * This version may be used in HV or non-HV context. * MSR[EE] must be disabled. */ #define FIXUP_ENDIAN \ -- cgit v1.2.3 From 850659392abc303d41c3f9217d45ab4fa79d201c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:07 -0700 Subject: powerpc/reg.h: delete duplicated word Drop the repeated word "a". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-8-rdunlap@infradead.org --- arch/powerpc/include/asm/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 97efbe2973fa..ae71027d3ca2 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1479,7 +1479,7 @@ static inline void update_power8_hid0(unsigned long hid0) { /* * The HID0 update on Power8 should at the very least be - * preceded by a a SYNC instruction followed by an ISYNC + * preceded by a SYNC instruction followed by an ISYNC * instruction */ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0)); -- cgit v1.2.3 From 3b56ed4b461fd92b66f6ea44d81837e12878031f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:08 -0700 Subject: powerpc/smu.h: delete duplicated word Drop the repeated word "the". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-9-rdunlap@infradead.org --- arch/powerpc/include/asm/smu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h index 8dff086c0cab..4b30a0205c93 100644 --- a/arch/powerpc/include/asm/smu.h +++ b/arch/powerpc/include/asm/smu.h @@ -108,7 +108,7 @@ /* * i2c commands * - * To issue an i2c command, first is to send a parameter block to the + * To issue an i2c command, first is to send a parameter block to * the SMU. This is a command of type 0x9a with 9 bytes of header * eventually followed by data for a write: * -- cgit v1.2.3 From 86052e407e8e1964c81965de25832258875a0e6d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Jul 2020 17:38:09 -0700 Subject: powerpc/powernv/pci.h: delete duplicated word Drop the repeated word "for". Signed-off-by: Randy Dunlap Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726003809.20454-10-rdunlap@infradead.org --- arch/powerpc/platforms/powernv/pci.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index c8cc152bdf52..739a0b3b72e1 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -94,7 +94,7 @@ struct pnv_ioda_pe { */ bool dma_setup_done; - /* MSIs. MVE index is identical for for 32 and 64 bit MSI + /* MSIs. MVE index is identical for 32 and 64 bit MSI * and -1 if not supported. (It's actually identical to the * PE number) */ -- cgit v1.2.3 From 5f987caec521cbb00d4ba2dc641ac8074626b762 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 27 Jul 2020 13:44:36 +1000 Subject: powerpc/fadump: Fix build error with CONFIG_PRESERVE_FA_DUMP=y MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skiroot_defconfig fails: arch/powerpc/kernel/fadump.c:48:17: error: ā€˜cpus_in_fadumpā€™ defined but not used 48 | static atomic_t cpus_in_fadump; Fix it by moving the definition into the #ifdef where it's used. Fixes: ba608c4fa12c ("powerpc/fadump: fix race between pstore write and fadump crash trigger") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727070341.595634-1-mpe@ellerman.id.au --- arch/powerpc/kernel/fadump.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 1858896d6809..10ebb4bf71ad 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -45,10 +45,12 @@ static struct fw_dump fw_dump; static void __init fadump_reserve_crash_area(u64 base); struct kobject *fadump_kobj; -static atomic_t cpus_in_fadump; #ifndef CONFIG_PRESERVE_FA_DUMP + +static atomic_t cpus_in_fadump; static DEFINE_MUTEX(fadump_mutex); + struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ -- cgit v1.2.3 From 443359aebce0e17148251c0e316801fe69aa7d33 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Wed, 29 Jul 2020 00:16:54 -0400 Subject: powerpc/perf: Fix MMCRA_BHRB_DISABLE define for binutils < 2.28 Commit 9908c826d5ed ("powerpc/perf: Add Power10 PMU feature to DT CPU features") defines MMCRA_BHRB_DISABLE as `0x2000000000UL`. Binutils version less than 2.28 doesn't support UL suffix. arch/powerpc/kernel/cpu_setup_power.S: Assembler messages: arch/powerpc/kernel/cpu_setup_power.S:250: Error: found 'L', expected: ')' arch/powerpc/kernel/cpu_setup_power.S:250: Error: junk at end of line, first unrecognized character is `L' arch/powerpc/kernel/cpu_setup_power.S:250: Error: found 'L', expected: ')' arch/powerpc/kernel/cpu_setup_power.S:250: Error: found 'L', expected: ')' arch/powerpc/kernel/cpu_setup_power.S:250: Error: junk at end of line, first unrecognized character is `L' arch/powerpc/kernel/cpu_setup_power.S:250: Error: found 'L', expected: ')' arch/powerpc/kernel/cpu_setup_power.S:250: Error: found 'L', expected: ')' arch/powerpc/kernel/cpu_setup_power.S:250: Error: operand out of range (0x0000002000000000 is not between 0xffffffffffff8000 and 0x000000000000ffff) Fix this by wrapping it with the `_UL` macro. Fixes: 9908c826d5ed ("Add Power10 PMU feature to DT CPU features") Suggested-by: Michael Ellerman Signed-off-by: Athira Rajeev Reviewed-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1595996214-5833-1-git-send-email-atrajeev@linux.vnet.ibm.com --- arch/powerpc/include/asm/reg.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index ae71027d3ca2..41419f1fc00f 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -12,6 +12,7 @@ #ifdef __KERNEL__ #include +#include #include #include #include @@ -888,7 +889,7 @@ #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ #define MMCRA_SLOT_SHIFT 24 #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ -#define MMCRA_BHRB_DISABLE 0x2000000000UL // BHRB disable bit for ISA v3.1 +#define MMCRA_BHRB_DISABLE _UL(0x2000000000) // BHRB disable bit for ISA v3.1 #define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ #define POWER6_MMCRA_SIHV 0x0000040000000000ULL #define POWER6_MMCRA_SIPR 0x0000020000000000ULL -- cgit v1.2.3 From 1f0ce497433f8944045ee1baae218e31a0d295ee Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 26 Jul 2020 13:51:53 +1000 Subject: powerpc: Inline doorbell sending functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are only called in one place for a given platform, so inline them for performance. Signed-off-by: Nicholas Piggin Tested-by: CĆ©dric Le Goater [mpe: Fix build errors related to KVM] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726035155.1424103-2-npiggin@gmail.com --- arch/powerpc/include/asm/dbell.h | 64 ++++++++++++++++++++++++++++++++++-- arch/powerpc/include/asm/kvm_booke.h | 2 ++ arch/powerpc/kernel/dbell.c | 55 ------------------------------- 3 files changed, 63 insertions(+), 58 deletions(-) diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index 4ce6808deed3..3e9da22a2779 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h @@ -11,8 +11,10 @@ #include #include +#include #include #include +#include #define PPC_DBELL_MSG_BRDCAST (0x04000000) #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) @@ -87,9 +89,6 @@ static inline void ppc_msgsync(void) #endif /* CONFIG_PPC_BOOK3S */ -extern void doorbell_global_ipi(int cpu); -extern void doorbell_core_ipi(int cpu); -extern int doorbell_try_core_ipi(int cpu); extern void doorbell_exception(struct pt_regs *regs); static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) @@ -100,4 +99,63 @@ static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) _ppc_msgsnd(msg); } +#ifdef CONFIG_SMP + +/* + * Doorbells must only be used if CPU_FTR_DBELL is available. + * msgsnd is used in HV, and msgsndp is used in !HV. + * + * These should be used by platform code that is aware of restrictions. + * Other arch code should use ->cause_ipi. + * + * doorbell_global_ipi() sends a dbell to any target CPU. + * Must be used only by architectures that address msgsnd target + * by PIR/get_hard_smp_processor_id. + */ +static inline void doorbell_global_ipi(int cpu) +{ + u32 tag = get_hard_smp_processor_id(cpu); + + kvmppc_set_host_ipi(cpu); + /* Order previous accesses vs. msgsnd, which is treated as a store */ + ppc_msgsnd_sync(); + ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); +} + +/* + * doorbell_core_ipi() sends a dbell to a target CPU in the same core. + * Must be used only by architectures that address msgsnd target + * by TIR/cpu_thread_in_core. + */ +static inline void doorbell_core_ipi(int cpu) +{ + u32 tag = cpu_thread_in_core(cpu); + + kvmppc_set_host_ipi(cpu); + /* Order previous accesses vs. msgsnd, which is treated as a store */ + ppc_msgsnd_sync(); + ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); +} + +/* + * Attempt to cause a core doorbell if destination is on the same core. + * Returns 1 on success, 0 on failure. + */ +static inline int doorbell_try_core_ipi(int cpu) +{ + int this_cpu = get_cpu(); + int ret = 0; + + if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) { + doorbell_core_ipi(cpu); + ret = 1; + } + + put_cpu(); + + return ret; +} + +#endif /* CONFIG_SMP */ + #endif /* _ASM_POWERPC_DBELL_H */ diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 310ba48d13f0..0c3401b2e19e 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -89,10 +89,12 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) return vcpu->arch.regs.nip; } +#ifdef CONFIG_BOOKE static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { return vcpu->arch.fault_dear; } +#endif static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) { diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f17ff1200eaa..52680cf07c9d 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -18,61 +18,6 @@ #ifdef CONFIG_SMP -/* - * Doorbells must only be used if CPU_FTR_DBELL is available. - * msgsnd is used in HV, and msgsndp is used in !HV. - * - * These should be used by platform code that is aware of restrictions. - * Other arch code should use ->cause_ipi. - * - * doorbell_global_ipi() sends a dbell to any target CPU. - * Must be used only by architectures that address msgsnd target - * by PIR/get_hard_smp_processor_id. - */ -void doorbell_global_ipi(int cpu) -{ - u32 tag = get_hard_smp_processor_id(cpu); - - kvmppc_set_host_ipi(cpu); - /* Order previous accesses vs. msgsnd, which is treated as a store */ - ppc_msgsnd_sync(); - ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); -} - -/* - * doorbell_core_ipi() sends a dbell to a target CPU in the same core. - * Must be used only by architectures that address msgsnd target - * by TIR/cpu_thread_in_core. - */ -void doorbell_core_ipi(int cpu) -{ - u32 tag = cpu_thread_in_core(cpu); - - kvmppc_set_host_ipi(cpu); - /* Order previous accesses vs. msgsnd, which is treated as a store */ - ppc_msgsnd_sync(); - ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag); -} - -/* - * Attempt to cause a core doorbell if destination is on the same core. - * Returns 1 on success, 0 on failure. - */ -int doorbell_try_core_ipi(int cpu) -{ - int this_cpu = get_cpu(); - int ret = 0; - - if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) { - doorbell_core_ipi(cpu); - ret = 1; - } - - put_cpu(); - - return ret; -} - void doorbell_exception(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); -- cgit v1.2.3 From 5b06d1679f2fe874ef49ea11324cd893ec9e2da8 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 26 Jul 2020 13:51:54 +1000 Subject: powerpc/pseries: Use doorbells even if XIVE is available MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KVM supports msgsndp in guests by trapping and emulating the instruction, so it was decided to always use XIVE for IPIs if it is available. However on PowerVM systems, msgsndp can be used and gives better performance. On large systems, high XIVE interrupt rates can have sub-linear scaling, and using msgsndp can reduce the load on the interrupt controller. So switch to using core local doorbells even if XIVE is available. This reduces performance for KVM guests with an SMT topology by about 50% for ping-pong context switching between SMT vCPUs. An option vector (or dt-cpu-ftrs) could be defined to disable msgsndp to get KVM performance back. Signed-off-by: Nicholas Piggin Tested-by: CĆ©dric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726035155.1424103-3-npiggin@gmail.com --- arch/powerpc/platforms/pseries/smp.c | 54 ++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 7ebacac03dc3..a1cb861154e9 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -162,13 +162,16 @@ static int pseries_smp_prepare_cpu(int cpu) return 0; } -static void smp_pseries_cause_ipi(int cpu) +/* Cause IPI as setup by the interrupt controller (xics or xive) */ +static void (*ic_cause_ipi)(int cpu) __ro_after_init; + +/* Use msgsndp doorbells target is a sibling, else use interrupt controller */ +static void dbell_or_ic_cause_ipi(int cpu) { - /* POWER9 should not use this handler */ if (doorbell_try_core_ipi(cpu)) return; - icp_ops->cause_ipi(cpu); + ic_cause_ipi(cpu); } static int pseries_cause_nmi_ipi(int cpu) @@ -192,26 +195,41 @@ static int pseries_cause_nmi_ipi(int cpu) return 0; } -static __init void pSeries_smp_probe_xics(void) -{ - xics_smp_probe(); - - if (cpu_has_feature(CPU_FTR_DBELL) && !is_secure_guest()) - smp_ops->cause_ipi = smp_pseries_cause_ipi; - else - smp_ops->cause_ipi = icp_ops->cause_ipi; -} - static __init void pSeries_smp_probe(void) { if (xive_enabled()) - /* - * Don't use P9 doorbells when XIVE is enabled. IPIs - * using MMIOs should be faster - */ xive_smp_probe(); else - pSeries_smp_probe_xics(); + xics_smp_probe(); + + /* No doorbell facility, must use the interrupt controller for IPIs */ + if (!cpu_has_feature(CPU_FTR_DBELL)) + return; + + /* Doorbells can only be used for IPIs between SMT siblings */ + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + + /* + * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp faults + * to the hypervisor which then reads the instruction from guest + * memory. This can't be done if the guest is secure, so don't use + * doorbells in secure guests. + * + * Under PowerVM, FSCR[MSGP] is enabled so doorbells could be used + * by secure guests if we distinguished this from KVM. + */ + if (is_secure_guest()) + return; + + /* + * The guest can use doobells for SMT sibling IPIs, which stay in + * the core rather than going to the interrupt controller. This + * tends to be slower under KVM where doorbells are emulated, but + * faster for PowerVM where they're enabled. + */ + ic_cause_ipi = smp_ops->cause_ipi; + smp_ops->cause_ipi = dbell_or_ic_cause_ipi; } static struct smp_ops_t pseries_smp_ops = { -- cgit v1.2.3 From 107c55005fbd5243ee31fb13b6f166cde9e3ade1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 26 Jul 2020 13:51:55 +1000 Subject: powerpc/pseries: Add KVM guest doorbell restrictions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KVM guests have certain restrictions and performance quirks when using doorbells. This patch moves the EPAPR KVM guest test so it can be shared with PSERIES, and uses that in doorbell setup code to apply the KVM guest quirks and improves IPI performance for two cases: - PowerVM guests may now use doorbells even if they are secure. - KVM guests no longer use doorbells if XIVE is available. There is a valid complaint that "KVM guest" is not a very reasonable thing to test for, it's preferable for the hypervisor to advertise particular behaviours to the guest so they could change if the hypervisor implementation or configuration changes. However in this case we were already assuming a KVM guest worst case, so this patch is about containing those quirks. If KVM later advertises fast doorbells, we should test for that and override the quirks. Signed-off-by: Nicholas Piggin Tested-by: CĆ©dric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200726035155.1424103-4-npiggin@gmail.com --- arch/powerpc/include/asm/firmware.h | 6 ++++++ arch/powerpc/include/asm/kvm_para.h | 26 +++--------------------- arch/powerpc/kernel/Makefile | 5 ++--- arch/powerpc/kernel/firmware.c | 19 ++++++++++++++++++ arch/powerpc/platforms/pseries/smp.c | 38 ++++++++++++++++++++++-------------- 5 files changed, 53 insertions(+), 41 deletions(-) diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 6003c2e533a0..f67efbaba17f 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -132,6 +132,12 @@ extern int ibm_nmi_interlock_token; extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST) +bool is_kvm_guest(void); +#else +static inline bool is_kvm_guest(void) { return false; } +#endif + #ifdef CONFIG_PPC_PSERIES void pseries_probe_fw_features(void); #else diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 9c1f6b4b9bbf..744612054c94 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -8,35 +8,15 @@ #ifndef __POWERPC_KVM_PARA_H__ #define __POWERPC_KVM_PARA_H__ -#include - -#ifdef CONFIG_KVM_GUEST - -#include - -static inline int kvm_para_available(void) -{ - struct device_node *hyper_node; - - hyper_node = of_find_node_by_path("/hypervisor"); - if (!hyper_node) - return 0; +#include - if (!of_device_is_compatible(hyper_node, "linux,kvm")) - return 0; - - return 1; -} - -#else +#include static inline int kvm_para_available(void) { - return 0; + return IS_ENABLED(CONFIG_KVM_GUEST) && is_kvm_guest(); } -#endif - static inline unsigned int kvm_arch_para_features(void) { unsigned long r; diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c5211bdcf1b6..d4d5946224f8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -45,11 +45,10 @@ obj-y := cputable.o syscalls.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ - of_platform.o prom_parse.o + of_platform.o prom_parse.o firmware.o obj-y += ptrace/ obj-$(CONFIG_PPC64) += setup_64.o \ - paca.o nvram_64.o firmware.o note.o \ - syscall_64.o + paca.o nvram_64.o note.o syscall_64.o obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c index cc4a5e3f51f1..fe48d319d490 100644 --- a/arch/powerpc/kernel/firmware.c +++ b/arch/powerpc/kernel/firmware.c @@ -11,8 +11,27 @@ #include #include +#include #include +#ifdef CONFIG_PPC64 unsigned long powerpc_firmware_features __read_mostly; EXPORT_SYMBOL_GPL(powerpc_firmware_features); +#endif + +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST) +bool is_kvm_guest(void) +{ + struct device_node *hyper_node; + + hyper_node = of_find_node_by_path("/hypervisor"); + if (!hyper_node) + return 0; + + if (!of_device_is_compatible(hyper_node, "linux,kvm")) + return 0; + + return 1; +} +#endif diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index a1cb861154e9..92922491a81c 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -210,24 +210,32 @@ static __init void pSeries_smp_probe(void) if (!cpu_has_feature(CPU_FTR_SMT)) return; - /* - * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp faults - * to the hypervisor which then reads the instruction from guest - * memory. This can't be done if the guest is secure, so don't use - * doorbells in secure guests. - * - * Under PowerVM, FSCR[MSGP] is enabled so doorbells could be used - * by secure guests if we distinguished this from KVM. - */ - if (is_secure_guest()) - return; + if (is_kvm_guest()) { + /* + * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp + * faults to the hypervisor which then reads the instruction + * from guest memory, which tends to be slower than using XIVE. + */ + if (xive_enabled()) + return; + + /* + * XICS hcalls aren't as fast, so we can use msgsndp (which + * also helps exercise KVM emulation), however KVM can't + * emulate secure guests because it can't read the instruction + * out of their memory. + */ + if (is_secure_guest()) + return; + } /* - * The guest can use doobells for SMT sibling IPIs, which stay in - * the core rather than going to the interrupt controller. This - * tends to be slower under KVM where doorbells are emulated, but - * faster for PowerVM where they're enabled. + * Under PowerVM, FSCR[MSGP] is enabled as guest vCPU siblings are + * gang scheduled on the same physical core, so doorbells are always + * faster than the interrupt controller, and they can be used by + * secure guests. */ + ic_cause_ipi = smp_ops->cause_ipi; smp_ops->cause_ipi = dbell_or_ic_cause_ipi; } -- cgit v1.2.3 From 128d3d0210076232b7d54c361082c8ee17e4b669 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 27 Jul 2020 09:30:35 +0530 Subject: selftests/powerpc: Move pkey helpers to headers This moves all the pkey-related helpers to a new header file and also a helper to print error messages in signal handlers to the existing utils header file. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/28e633fa9ec1a6500c12188e09ea1887b10a10c1.1595821792.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/pkeys.h | 108 +++++++++++++++++++++ tools/testing/selftests/powerpc/include/utils.h | 4 + .../testing/selftests/powerpc/mm/pkey_exec_prot.c | 100 +------------------ 3 files changed, 114 insertions(+), 98 deletions(-) create mode 100644 tools/testing/selftests/powerpc/include/pkeys.h diff --git a/tools/testing/selftests/powerpc/include/pkeys.h b/tools/testing/selftests/powerpc/include/pkeys.h new file mode 100644 index 000000000000..9b53a97e664e --- /dev/null +++ b/tools/testing/selftests/powerpc/include/pkeys.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020, Sandipan Das, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_PKEYS_H +#define _SELFTESTS_POWERPC_PKEYS_H + +#include + +#include "reg.h" +#include "utils.h" + +/* + * Older versions of libc use the Intel-specific access rights. + * Hence, override the definitions as they might be incorrect. + */ +#undef PKEY_DISABLE_ACCESS +#define PKEY_DISABLE_ACCESS 0x3 + +#undef PKEY_DISABLE_WRITE +#define PKEY_DISABLE_WRITE 0x2 + +#undef PKEY_DISABLE_EXECUTE +#define PKEY_DISABLE_EXECUTE 0x4 + +/* Older versions of libc do not not define this */ +#ifndef SEGV_PKUERR +#define SEGV_PKUERR 4 +#endif + +#define SI_PKEY_OFFSET 0x20 + +#define SYS_pkey_mprotect 386 +#define SYS_pkey_alloc 384 +#define SYS_pkey_free 385 + +#define PKEY_BITS_PER_PKEY 2 +#define NR_PKEYS 32 +#define PKEY_BITS_MASK ((1UL << PKEY_BITS_PER_PKEY) - 1) + +inline unsigned long pkeyreg_get(void) +{ + return mfspr(SPRN_AMR); +} + +inline void pkeyreg_set(unsigned long amr) +{ + set_amr(amr); +} + +void pkey_set_rights(int pkey, unsigned long rights) +{ + unsigned long amr, shift; + + shift = (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY; + amr = pkeyreg_get(); + amr &= ~(PKEY_BITS_MASK << shift); + amr |= (rights & PKEY_BITS_MASK) << shift; + pkeyreg_set(amr); +} + +int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey) +{ + return syscall(SYS_pkey_mprotect, addr, len, prot, pkey); +} + +int sys_pkey_alloc(unsigned long flags, unsigned long rights) +{ + return syscall(SYS_pkey_alloc, flags, rights); +} + +int sys_pkey_free(int pkey) +{ + return syscall(SYS_pkey_free, pkey); +} + +int pkeys_unsupported(void) +{ + bool hash_mmu = false; + int pkey; + + /* Protection keys are currently supported on Hash MMU only */ + FAIL_IF(using_hash_mmu(&hash_mmu)); + SKIP_IF(!hash_mmu); + + /* Check if the system call is supported */ + pkey = sys_pkey_alloc(0, 0); + SKIP_IF(pkey < 0); + sys_pkey_free(pkey); + + return 0; +} + +int siginfo_pkey(siginfo_t *si) +{ + /* + * In older versions of libc, siginfo_t does not have si_pkey as + * a member. + */ +#ifdef si_pkey + return si->si_pkey; +#else + return *((int *)(((char *) si) + SI_PKEY_OFFSET)); +#endif +} + +#endif /* _SELFTESTS_POWERPC_PKEYS_H */ diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h index 9dbe607cc5ec..7f259f36e23b 100644 --- a/tools/testing/selftests/powerpc/include/utils.h +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -97,6 +97,10 @@ do { \ #define _str(s) #s #define str(s) _str(s) +#define sigsafe_err(msg) ({ \ + ssize_t nbytes __attribute__((unused)); \ + nbytes = write(STDERR_FILENO, msg, strlen(msg)); }) + /* POWER9 feature */ #ifndef PPC_FEATURE2_ARCH_3_00 #define PPC_FEATURE2_ARCH_3_00 0x00800000 diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c index 7c7c93425c5e..1253ad6afba2 100644 --- a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c +++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c @@ -14,83 +14,13 @@ #include #include -#include -#include "reg.h" -#include "utils.h" - -/* - * Older versions of libc use the Intel-specific access rights. - * Hence, override the definitions as they might be incorrect. - */ -#undef PKEY_DISABLE_ACCESS -#define PKEY_DISABLE_ACCESS 0x3 - -#undef PKEY_DISABLE_WRITE -#define PKEY_DISABLE_WRITE 0x2 - -#undef PKEY_DISABLE_EXECUTE -#define PKEY_DISABLE_EXECUTE 0x4 - -/* Older versions of libc do not not define this */ -#ifndef SEGV_PKUERR -#define SEGV_PKUERR 4 -#endif - -#define SI_PKEY_OFFSET 0x20 - -#define SYS_pkey_mprotect 386 -#define SYS_pkey_alloc 384 -#define SYS_pkey_free 385 - -#define PKEY_BITS_PER_PKEY 2 -#define NR_PKEYS 32 -#define PKEY_BITS_MASK ((1UL << PKEY_BITS_PER_PKEY) - 1) +#include "pkeys.h" #define PPC_INST_NOP 0x60000000 #define PPC_INST_TRAP 0x7fe00008 #define PPC_INST_BLR 0x4e800020 -#define sigsafe_err(msg) ({ \ - ssize_t nbytes __attribute__((unused)); \ - nbytes = write(STDERR_FILENO, msg, strlen(msg)); }) - -static inline unsigned long pkeyreg_get(void) -{ - return mfspr(SPRN_AMR); -} - -static inline void pkeyreg_set(unsigned long amr) -{ - set_amr(amr); -} - -static void pkey_set_rights(int pkey, unsigned long rights) -{ - unsigned long amr, shift; - - shift = (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY; - amr = pkeyreg_get(); - amr &= ~(PKEY_BITS_MASK << shift); - amr |= (rights & PKEY_BITS_MASK) << shift; - pkeyreg_set(amr); -} - -static int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey) -{ - return syscall(SYS_pkey_mprotect, addr, len, prot, pkey); -} - -static int sys_pkey_alloc(unsigned long flags, unsigned long rights) -{ - return syscall(SYS_pkey_alloc, flags, rights); -} - -static int sys_pkey_free(int pkey) -{ - return syscall(SYS_pkey_free, pkey); -} - static volatile sig_atomic_t fault_pkey, fault_code, fault_type; static volatile sig_atomic_t remaining_faults; static volatile unsigned int *fault_addr; @@ -110,16 +40,7 @@ static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) { int signal_pkey; - /* - * In older versions of libc, siginfo_t does not have si_pkey as - * a member. - */ -#ifdef si_pkey - signal_pkey = sinfo->si_pkey; -#else - signal_pkey = *((int *)(((char *) sinfo) + SI_PKEY_OFFSET)); -#endif - + signal_pkey = siginfo_pkey(sinfo); fault_code = sinfo->si_code; /* Check if this fault originated from the expected address */ @@ -178,23 +99,6 @@ static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) remaining_faults--; } -static int pkeys_unsupported(void) -{ - bool hash_mmu = false; - int pkey; - - /* Protection keys are currently supported on Hash MMU only */ - FAIL_IF(using_hash_mmu(&hash_mmu)); - SKIP_IF(!hash_mmu); - - /* Check if the system call is supported */ - pkey = sys_pkey_alloc(0, 0); - SKIP_IF(pkey < 0); - sys_pkey_free(pkey); - - return 0; -} - static int test(void) { struct sigaction segv_act, trap_act; -- cgit v1.2.3 From 264d7fccc4711328a19f07e6bd57aee4c68803aa Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 27 Jul 2020 09:30:36 +0530 Subject: selftests/powerpc: Add pkey helpers for rights This adds some new pkey-related helper to print access rights of a pkey in the "rwx" format and to generate different valid combinations of pkey rights starting from a given combination. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6cc1c7d1f686618668a3e090f1d0c2a4cd9dea3f.1595821792.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/pkeys.h | 28 +++++++++++++++++ .../testing/selftests/powerpc/mm/pkey_exec_prot.c | 36 ++++++++++++---------- 2 files changed, 48 insertions(+), 16 deletions(-) diff --git a/tools/testing/selftests/powerpc/include/pkeys.h b/tools/testing/selftests/powerpc/include/pkeys.h index 9b53a97e664e..6ba95039a034 100644 --- a/tools/testing/selftests/powerpc/include/pkeys.h +++ b/tools/testing/selftests/powerpc/include/pkeys.h @@ -105,4 +105,32 @@ int siginfo_pkey(siginfo_t *si) #endif } +#define pkey_rights(r) ({ \ + static char buf[4] = "rwx"; \ + unsigned int amr_bits; \ + if ((r) & PKEY_DISABLE_EXECUTE) \ + buf[2] = '-'; \ + amr_bits = (r) & PKEY_BITS_MASK; \ + if (amr_bits & PKEY_DISABLE_WRITE) \ + buf[1] = '-'; \ + if (amr_bits & PKEY_DISABLE_ACCESS & ~PKEY_DISABLE_WRITE) \ + buf[0] = '-'; \ + buf; \ +}) + +unsigned long next_pkey_rights(unsigned long rights) +{ + if (rights == PKEY_DISABLE_ACCESS) + return PKEY_DISABLE_EXECUTE; + else if (rights == (PKEY_DISABLE_ACCESS | PKEY_DISABLE_EXECUTE)) + return 0; + + if ((rights & PKEY_BITS_MASK) == 0) + rights |= PKEY_DISABLE_WRITE; + else if ((rights & PKEY_BITS_MASK) == PKEY_DISABLE_WRITE) + rights |= PKEY_DISABLE_ACCESS; + + return rights; +} + #endif /* _SELFTESTS_POWERPC_PKEYS_H */ diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c index 1253ad6afba2..18ebfe6bae1c 100644 --- a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c +++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c @@ -102,6 +102,7 @@ static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) static int test(void) { struct sigaction segv_act, trap_act; + unsigned long rights; int pkey, ret, i; ret = pkeys_unsupported(); @@ -150,7 +151,8 @@ static int test(void) insns[numinsns - 1] = PPC_INST_BLR; /* Allocate a pkey that restricts execution */ - pkey = sys_pkey_alloc(0, PKEY_DISABLE_EXECUTE); + rights = PKEY_DISABLE_EXECUTE; + pkey = sys_pkey_alloc(0, rights); FAIL_IF(pkey < 0); /* @@ -175,8 +177,8 @@ static int test(void) */ remaining_faults = 0; FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - printf("read from %p, pkey is execute-disabled, access-enabled\n", - (void *) fault_addr); + printf("read from %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); i = *fault_addr; FAIL_IF(remaining_faults != 0); @@ -192,12 +194,13 @@ static int test(void) */ remaining_faults = 1; FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - printf("write to %p, pkey is execute-disabled, access-enabled\n", - (void *) fault_addr); + printf("write to %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); *fault_addr = PPC_INST_TRAP; FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); /* The following three cases will generate SEGV_PKUERR */ + rights |= PKEY_DISABLE_ACCESS; fault_type = PKEY_DISABLE_ACCESS; fault_pkey = pkey; @@ -211,9 +214,9 @@ static int test(void) */ remaining_faults = 1; FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - printf("read from %p, pkey is execute-disabled, access-disabled\n", - (void *) fault_addr); - pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); + pkey_set_rights(pkey, rights); + printf("read from %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); i = *fault_addr; FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR); @@ -228,9 +231,9 @@ static int test(void) */ remaining_faults = 2; FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - printf("write to %p, pkey is execute-disabled, access-disabled\n", - (void *) fault_addr); - pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); + pkey_set_rights(pkey, rights); + printf("write to %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); *fault_addr = PPC_INST_NOP; FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); @@ -251,8 +254,8 @@ static int test(void) remaining_faults = 1; FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); - printf("execute at %p, pkey is execute-disabled, access-disabled\n", - (void *) fault_addr); + printf("execute at %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); asm volatile("mtctr %0; bctrl" : : "r"(insns)); FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR); @@ -261,7 +264,8 @@ static int test(void) * fully permissive. */ sys_pkey_free(pkey); - pkey = sys_pkey_alloc(0, 0); + rights = 0; + pkey = sys_pkey_alloc(0, rights); /* * Jump to the executable region when AMR bits are not set @@ -274,8 +278,8 @@ static int test(void) fault_pkey = pkey; remaining_faults = 0; FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - printf("execute at %p, pkey is execute-enabled, access-enabled\n", - (void *) fault_addr); + printf("execute at %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); asm volatile("mtctr %0; bctrl" : : "r"(insns)); FAIL_IF(remaining_faults != 0); -- cgit v1.2.3 From 03634bbf5d8a6f2d97e6150a1b8ff03675badac3 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 27 Jul 2020 09:30:37 +0530 Subject: selftests/powerpc: Harden test for execute-disabled pkeys Commit 192b6a7805989 ("powerpc/book3s64/pkeys: Fix pkey_access_permitted() for execute disable pkey") fixed a bug that caused repetitive faults for pkeys with no execute rights alongside some combination of read and write rights. This removes the last two cases of the test, which check the behaviour of pkeys with read, write but no execute rights and all the rights, in favour of checking all the possible combinations of read, write and execute rights to be able to detect bugs like the one mentioned above. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/db467500f8af47727bba6b35796e8974a78b71e5.1595821792.git.sandipan@linux.ibm.com --- .../testing/selftests/powerpc/mm/pkey_exec_prot.c | 84 +++++++++++----------- 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c index 18ebfe6bae1c..9e5c7f3f498a 100644 --- a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c +++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c @@ -237,55 +237,53 @@ static int test(void) *fault_addr = PPC_INST_NOP; FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); - /* - * Jump to the executable region when AMR bits are set i.e. - * the pkey permits neither read nor write access. - * - * This should generate a pkey fault based on IAMR bits which - * are set to not permit execution. AMR bits should not affect - * execution. - * - * This also checks if the overwrite of the first instruction - * word from a trap to a no-op succeeded. - */ - fault_addr = insns; - fault_type = PKEY_DISABLE_EXECUTE; - fault_pkey = pkey; - remaining_faults = 1; - FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - pkey_set_rights(pkey, PKEY_DISABLE_ACCESS); - printf("execute at %p, pkey permissions are %s\n", fault_addr, - pkey_rights(rights)); - asm volatile("mtctr %0; bctrl" : : "r"(insns)); - FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR); - - /* - * Free the current pkey and allocate a new one that is - * fully permissive. - */ + /* Free the current pkey */ sys_pkey_free(pkey); + rights = 0; - pkey = sys_pkey_alloc(0, rights); + do { + /* + * Allocate pkeys with all valid combinations of read, + * write and execute restrictions. + */ + pkey = sys_pkey_alloc(0, rights); + FAIL_IF(pkey < 0); + + /* + * Jump to the executable region. AMR bits may or may not + * be set but they should not affect execution. + * + * This should generate pkey faults based on IAMR bits which + * may be set to restrict execution. + * + * The first iteration also checks if the overwrite of the + * first instruction word from a trap to a no-op succeeded. + */ + fault_pkey = pkey; + fault_type = -1; + remaining_faults = 0; + if (rights & PKEY_DISABLE_EXECUTE) { + fault_type = PKEY_DISABLE_EXECUTE; + remaining_faults = 1; + } - /* - * Jump to the executable region when AMR bits are not set - * i.e. the pkey permits read and write access. - * - * This should not generate any faults as the IAMR bits are - * also not set and hence will the pkey will not restrict - * execution. - */ - fault_pkey = pkey; - remaining_faults = 0; - FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); - printf("execute at %p, pkey permissions are %s\n", fault_addr, - pkey_rights(rights)); - asm volatile("mtctr %0; bctrl" : : "r"(insns)); - FAIL_IF(remaining_faults != 0); + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("execute at %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); + asm volatile("mtctr %0; bctrl" : : "r"(insns)); + FAIL_IF(remaining_faults != 0); + if (rights & PKEY_DISABLE_EXECUTE) + FAIL_IF(fault_code != SEGV_PKUERR); + + /* Free the current pkey */ + sys_pkey_free(pkey); + + /* Find next valid combination of pkey rights */ + rights = next_pkey_rights(rights); + } while (rights); /* Cleanup */ munmap((void *) insns, pgsize); - sys_pkey_free(pkey); return 0; } -- cgit v1.2.3 From e3333c599482245d08002725cc1b353e4963fa26 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 27 Jul 2020 09:30:38 +0530 Subject: selftests/powerpc: Add helper to exit on failure This adds a helper similar to FAIL_IF() which lets a program exit with code 1 (to indicate failure) when the given condition is true. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/dac282d5c2e96e7816dc522e4e20d56d7c79c898.1595821792.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/utils.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h index 7f259f36e23b..69d16875802d 100644 --- a/tools/testing/selftests/powerpc/include/utils.h +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -72,6 +72,15 @@ do { \ } \ } while (0) +#define FAIL_IF_EXIT(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[FAIL] Test FAILED on line %d\n", __LINE__); \ + _exit(1); \ + } \ +} while (0) + /* The test harness uses this, yes it's gross */ #define MAGIC_SKIP_RETURN_VALUE 99 -- cgit v1.2.3 From 743f3544fffb9662aaf550c8358a8c1b6fcae707 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 27 Jul 2020 09:30:39 +0530 Subject: selftests/powerpc: Add wrapper for gettid The gettid() syscall wrapper was first introduced in glibc 2.30. This adds a wrapper for use in distros running older versions. Suggested-by: Christophe Leroy Suggested-by: Michael Ellerman Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8ca3b0eeda989707815d1cf337cc33f090408965.1595821792.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/utils.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h index 69d16875802d..71d2924f5b8b 100644 --- a/tools/testing/selftests/powerpc/include/utils.h +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -42,6 +42,16 @@ int perf_event_enable(int fd); int perf_event_disable(int fd); int perf_event_reset(int fd); +#if !defined(__GLIBC_PREREQ) || !__GLIBC_PREREQ(2, 30) +#include +#include + +static inline pid_t gettid(void) +{ + return syscall(SYS_gettid); +} +#endif + static inline bool have_hwcap(unsigned long ftr) { return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr; -- cgit v1.2.3 From c27f2fd1705a7e19ef2dc2b986c0d1cde3c3dbe7 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 27 Jul 2020 09:30:40 +0530 Subject: selftests/powerpc: Add test for pkey siginfo verification Commit c46241a370a61 ("powerpc/pkeys: Check vma before returning key fault error to the user") fixes a bug which causes the kernel to set the wrong pkey in siginfo when a pkey fault occurs after two competing threads that have allocated different pkeys, one fully permissive and the other restrictive, attempt to protect a common page at the same time. This adds a test to detect the bug. Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ce40b6ee270bda52e8f4088578ed2faf7d1d509a.1595821792.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/mm/.gitignore | 1 + tools/testing/selftests/powerpc/mm/Makefile | 5 +- tools/testing/selftests/powerpc/mm/pkey_siginfo.c | 333 ++++++++++++++++++++++ 3 files changed, 338 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/mm/pkey_siginfo.c diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 8d041f508a51..0323892a8032 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -8,3 +8,4 @@ large_vm_fork_separation bad_accesses tlbie_test pkey_exec_prot +pkey_siginfo diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 5a86d59441dc..740f7854548e 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -3,7 +3,8 @@ noarg: $(MAKE) -C ../ TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ - large_vm_fork_separation bad_accesses pkey_exec_prot + large_vm_fork_separation bad_accesses pkey_exec_prot \ + pkey_siginfo TEST_GEN_PROGS_EXTENDED := tlbie_test TEST_GEN_FILES := tempfile @@ -16,8 +17,10 @@ $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 $(OUTPUT)/pkey_exec_prot: CFLAGS += -m64 +$(OUTPUT)/pkey_siginfo: CFLAGS += -m64 $(OUTPUT)/tempfile: dd if=/dev/zero of=$@ bs=64k count=1 $(OUTPUT)/tlbie_test: LDLIBS += -lpthread +$(OUTPUT)/pkey_siginfo: LDLIBS += -lpthread diff --git a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c new file mode 100644 index 000000000000..4f815d7c1214 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020, Sandipan Das, IBM Corp. + * + * Test if the signal information reports the correct memory protection + * key upon getting a key access violation fault for a page that was + * attempted to be protected by two different keys from two competing + * threads at the same time. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include + +#include +#include +#include + +#include "pkeys.h" + +#define PPC_INST_NOP 0x60000000 +#define PPC_INST_BLR 0x4e800020 +#define PROT_RWX (PROT_READ | PROT_WRITE | PROT_EXEC) + +#define NUM_ITERATIONS 1000000 + +static volatile sig_atomic_t perm_pkey, rest_pkey; +static volatile sig_atomic_t rights, fault_count; +static volatile unsigned int *volatile fault_addr; +static pthread_barrier_t iteration_barrier; + +static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + void *pgstart; + size_t pgsize; + int pkey; + + pkey = siginfo_pkey(sinfo); + + /* Check if this fault originated from a pkey access violation */ + if (sinfo->si_code != SEGV_PKUERR) { + sigsafe_err("got a fault for an unexpected reason\n"); + _exit(1); + } + + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *) fault_addr) { + sigsafe_err("got a fault for an unexpected address\n"); + _exit(1); + } + + /* Check if this fault originated from the restrictive pkey */ + if (pkey != rest_pkey) { + sigsafe_err("got a fault for an unexpected pkey\n"); + _exit(1); + } + + /* Check if too many faults have occurred for the same iteration */ + if (fault_count > 0) { + sigsafe_err("got too many faults for the same address\n"); + _exit(1); + } + + pgsize = getpagesize(); + pgstart = (void *) ((unsigned long) fault_addr & ~(pgsize - 1)); + + /* + * If the current fault occurred due to lack of execute rights, + * reassociate the page with the exec-only pkey since execute + * rights cannot be changed directly for the faulting pkey as + * IAMR is inaccessible from userspace. + * + * Otherwise, if the current fault occurred due to lack of + * read-write rights, change the AMR permission bits for the + * pkey. + * + * This will let the test continue. + */ + if (rights == PKEY_DISABLE_EXECUTE && + mprotect(pgstart, pgsize, PROT_EXEC)) + _exit(1); + else + pkey_set_rights(pkey, 0); + + fault_count++; +} + +struct region { + unsigned long rights; + unsigned int *base; + size_t size; +}; + +static void *protect(void *p) +{ + unsigned long rights; + unsigned int *base; + size_t size; + int tid, i; + + tid = gettid(); + base = ((struct region *) p)->base; + size = ((struct region *) p)->size; + FAIL_IF_EXIT(!base); + + /* No read, write and execute restrictions */ + rights = 0; + + printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights)); + + /* Allocate the permissive pkey */ + perm_pkey = sys_pkey_alloc(0, rights); + FAIL_IF_EXIT(perm_pkey < 0); + + /* + * Repeatedly try to protect the common region with a permissive + * pkey + */ + for (i = 0; i < NUM_ITERATIONS; i++) { + /* + * Wait until the other thread has finished allocating the + * restrictive pkey or until the next iteration has begun + */ + pthread_barrier_wait(&iteration_barrier); + + /* Try to associate the permissive pkey with the region */ + FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX, + perm_pkey)); + } + + /* Free the permissive pkey */ + sys_pkey_free(perm_pkey); + + return NULL; +} + +static void *protect_access(void *p) +{ + size_t size, numinsns; + unsigned int *base; + int tid, i; + + tid = gettid(); + base = ((struct region *) p)->base; + size = ((struct region *) p)->size; + rights = ((struct region *) p)->rights; + numinsns = size / sizeof(base[0]); + FAIL_IF_EXIT(!base); + + /* Allocate the restrictive pkey */ + rest_pkey = sys_pkey_alloc(0, rights); + FAIL_IF_EXIT(rest_pkey < 0); + + printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights)); + printf("tid %d, %s randomly in range [%p, %p]\n", tid, + (rights == PKEY_DISABLE_EXECUTE) ? "execute" : + (rights == PKEY_DISABLE_WRITE) ? "write" : "read", + base, base + numinsns); + + /* + * Repeatedly try to protect the common region with a restrictive + * pkey and read, write or execute from it + */ + for (i = 0; i < NUM_ITERATIONS; i++) { + /* + * Wait until the other thread has finished allocating the + * permissive pkey or until the next iteration has begun + */ + pthread_barrier_wait(&iteration_barrier); + + /* Try to associate the restrictive pkey with the region */ + FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX, + rest_pkey)); + + /* Choose a random instruction word address from the region */ + fault_addr = base + (rand() % numinsns); + fault_count = 0; + + switch (rights) { + /* Read protection test */ + case PKEY_DISABLE_ACCESS: + /* + * Read an instruction word from the region and + * verify if it has not been overwritten to + * something unexpected + */ + FAIL_IF_EXIT(*fault_addr != PPC_INST_NOP && + *fault_addr != PPC_INST_BLR); + break; + + /* Write protection test */ + case PKEY_DISABLE_WRITE: + /* + * Write an instruction word to the region and + * verify if the overwrite has succeeded + */ + *fault_addr = PPC_INST_BLR; + FAIL_IF_EXIT(*fault_addr != PPC_INST_BLR); + break; + + /* Execute protection test */ + case PKEY_DISABLE_EXECUTE: + /* Jump to the region and execute instructions */ + asm volatile( + "mtctr %0; bctrl" + : : "r"(fault_addr) : "ctr", "lr"); + break; + } + + /* + * Restore the restrictions originally imposed by the + * restrictive pkey as the signal handler would have + * cleared out the corresponding AMR bits + */ + pkey_set_rights(rest_pkey, rights); + } + + /* Free restrictive pkey */ + sys_pkey_free(rest_pkey); + + return NULL; +} + +static void reset_pkeys(unsigned long rights) +{ + int pkeys[NR_PKEYS], i; + + /* Exhaustively allocate all available pkeys */ + for (i = 0; i < NR_PKEYS; i++) + pkeys[i] = sys_pkey_alloc(0, rights); + + /* Free all allocated pkeys */ + for (i = 0; i < NR_PKEYS; i++) + sys_pkey_free(pkeys[i]); +} + +static int test(void) +{ + pthread_t prot_thread, pacc_thread; + struct sigaction act; + pthread_attr_t attr; + size_t numinsns; + struct region r; + int ret, i; + + srand(time(NULL)); + ret = pkeys_unsupported(); + if (ret) + return ret; + + /* Allocate the region */ + r.size = getpagesize(); + r.base = mmap(NULL, r.size, PROT_RWX, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + FAIL_IF(r.base == MAP_FAILED); + + /* + * Fill the region with no-ops with a branch at the end + * for returning to the caller + */ + numinsns = r.size / sizeof(r.base[0]); + for (i = 0; i < numinsns - 1; i++) + r.base[i] = PPC_INST_NOP; + r.base[i] = PPC_INST_BLR; + + /* Setup SIGSEGV handler */ + act.sa_handler = 0; + act.sa_sigaction = segv_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &act.sa_mask) != 0); + act.sa_flags = SA_SIGINFO; + act.sa_restorer = 0; + FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0); + + /* + * For these tests, the parent process should clear all bits of + * AMR and IAMR, i.e. impose no restrictions, for all available + * pkeys. This will be the base for the initial AMR and IAMR + * values for all the test thread pairs. + * + * If the AMR and IAMR bits of all available pkeys are cleared + * before running the tests and a fault is generated when + * attempting to read, write or execute instructions from a + * pkey protected region, the pkey responsible for this must be + * the one from the protect-and-access thread since the other + * one is fully permissive. Despite that, if the pkey reported + * by siginfo is not the restrictive pkey, then there must be a + * kernel bug. + */ + reset_pkeys(0); + + /* Setup barrier for protect and protect-and-access threads */ + FAIL_IF(pthread_attr_init(&attr) != 0); + FAIL_IF(pthread_barrier_init(&iteration_barrier, NULL, 2) != 0); + + /* Setup and start protect and protect-and-read threads */ + puts("starting thread pair (protect, protect-and-read)"); + r.rights = PKEY_DISABLE_ACCESS; + FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0); + FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0); + FAIL_IF(pthread_join(prot_thread, NULL) != 0); + FAIL_IF(pthread_join(pacc_thread, NULL) != 0); + + /* Setup and start protect and protect-and-write threads */ + puts("starting thread pair (protect, protect-and-write)"); + r.rights = PKEY_DISABLE_WRITE; + FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0); + FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0); + FAIL_IF(pthread_join(prot_thread, NULL) != 0); + FAIL_IF(pthread_join(pacc_thread, NULL) != 0); + + /* Setup and start protect and protect-and-execute threads */ + puts("starting thread pair (protect, protect-and-execute)"); + r.rights = PKEY_DISABLE_EXECUTE; + FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0); + FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0); + FAIL_IF(pthread_join(prot_thread, NULL) != 0); + FAIL_IF(pthread_join(pacc_thread, NULL) != 0); + + /* Cleanup */ + FAIL_IF(pthread_attr_destroy(&attr) != 0); + FAIL_IF(pthread_barrier_destroy(&iteration_barrier) != 0); + munmap(r.base, r.size); + + return 0; +} + +int main(void) +{ + test_harness(test, "pkey_siginfo"); +} -- cgit v1.2.3 From 5f8cf6475828b600ff6d000e580c961ac839cc61 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 27 Jul 2020 11:01:27 +1000 Subject: selftests/powerpc: Squash spurious errors due to device removal For drivers that don't have the error handling callbacks we implement recovery by removing the device and re-probing it. This causes the sysfs directory for the PCI device to be removed which causes the following spurious error to be printed when checking the PE state: Breaking 0005:03:00.0... ./eeh-basic.sh: line 13: can't open /sys/bus/pci/devices/0005:03:00.0/eeh_pe_state: no such file 0005:03:00.0, waited 0/60 0005:03:00.0, waited 1/60 0005:03:00.0, waited 2/60 0005:03:00.0, waited 3/60 0005:03:00.0, waited 4/60 0005:03:00.0, waited 5/60 0005:03:00.0, waited 6/60 0005:03:00.0, waited 7/60 0005:03:00.0, Recovered after 8 seconds We currently try to avoid this by checking if the PE state file exists before reading from it. This is however inherently racy so re-work the state checking so that we only read from the file once, and we squash any errors that occur while reading. Fixes: 85d86c8aa52e ("selftests/powerpc: Add basic EEH selftest") Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727010127.23698-1-oohall@gmail.com --- tools/testing/selftests/powerpc/eeh/eeh-functions.sh | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh index f52ed92b53e7..00dc32c0ed75 100755 --- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh @@ -5,12 +5,17 @@ pe_ok() { local dev="$1" local path="/sys/bus/pci/devices/$dev/eeh_pe_state" - if ! [ -e "$path" ] ; then + # if a driver doesn't support the error handling callbacks then the + # device is recovered by removing and re-probing it. This causes the + # sysfs directory to disappear so read the PE state once and squash + # any potential error messages + local eeh_state="$(cat $path 2>/dev/null)" + if [ -z "$eeh_state" ]; then return 1; fi - local fw_state="$(cut -d' ' -f1 < $path)" - local sw_state="$(cut -d' ' -f2 < $path)" + local fw_state="$(echo $eeh_state | cut -d' ' -f1)" + local sw_state="$(echo $eeh_state | cut -d' ' -f2)" # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an # error state or being recovered. Either way, not ok. -- cgit v1.2.3 From c9938a9dac95be7650218cdd8e9d1f882e7b5691 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 19:25:24 +1000 Subject: selftests/powerpc: Add test of stack expansion logic We have custom stack expansion checks that it turns out are extremely badly tested and contain bugs, surprise. So add some tests that exercise the code and capture the current boundary conditions. The signal test currently fails on 64-bit kernels because the 2048 byte allowance for the signal frame is too small, we will fix that in a subsequent patch. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724092528.1578671-1-mpe@ellerman.id.au --- tools/testing/selftests/powerpc/mm/.gitignore | 2 + tools/testing/selftests/powerpc/mm/Makefile | 8 +- .../selftests/powerpc/mm/stack_expansion_ldst.c | 233 +++++++++++++++++++++ .../selftests/powerpc/mm/stack_expansion_signal.c | 118 +++++++++++ tools/testing/selftests/powerpc/pmu/lib.h | 1 + 5 files changed, 361 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c create mode 100644 tools/testing/selftests/powerpc/mm/stack_expansion_signal.c diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 0323892a8032..91c775c23c66 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -9,3 +9,5 @@ bad_accesses tlbie_test pkey_exec_prot pkey_siginfo +stack_expansion_ldst +stack_expansion_signal diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 740f7854548e..250ce172e0da 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -4,7 +4,8 @@ noarg: TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ large_vm_fork_separation bad_accesses pkey_exec_prot \ - pkey_siginfo + pkey_siginfo stack_expansion_signal stack_expansion_ldst + TEST_GEN_PROGS_EXTENDED := tlbie_test TEST_GEN_FILES := tempfile @@ -19,6 +20,11 @@ $(OUTPUT)/bad_accesses: CFLAGS += -m64 $(OUTPUT)/pkey_exec_prot: CFLAGS += -m64 $(OUTPUT)/pkey_siginfo: CFLAGS += -m64 +$(OUTPUT)/stack_expansion_signal: ../utils.c ../pmu/lib.c + +$(OUTPUT)/stack_expansion_ldst: CFLAGS += -fno-stack-protector +$(OUTPUT)/stack_expansion_ldst: ../utils.c + $(OUTPUT)/tempfile: dd if=/dev/zero of=$@ bs=64k count=1 diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c new file mode 100644 index 000000000000..0587e11437f5 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test that loads/stores expand the stack segment, or trigger a SEGV, in + * various conditions. + * + * Based on test code by Tom Lane. + */ + +#undef NDEBUG +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _KB (1024) +#define _MB (1024 * 1024) + +volatile char *stack_top_ptr; +volatile unsigned long stack_top_sp; +volatile char c; + +enum access_type { + LOAD, + STORE, +}; + +/* + * Consume stack until the stack pointer is below @target_sp, then do an access + * (load or store) at offset @delta from either the base of the stack or the + * current stack pointer. + */ +__attribute__ ((noinline)) +int consume_stack(unsigned long target_sp, unsigned long stack_high, int delta, enum access_type type) +{ + unsigned long target; + char stack_cur; + + if ((unsigned long)&stack_cur > target_sp) + return consume_stack(target_sp, stack_high, delta, type); + else { + // We don't really need this, but without it GCC might not + // generate a recursive call above. + stack_top_ptr = &stack_cur; + +#ifdef __powerpc__ + asm volatile ("mr %[sp], %%r1" : [sp] "=r" (stack_top_sp)); +#else + asm volatile ("mov %%rsp, %[sp]" : [sp] "=r" (stack_top_sp)); +#endif + + // Kludge, delta < 0 indicates relative to SP + if (delta < 0) + target = stack_top_sp + delta; + else + target = stack_high - delta + 1; + + volatile char *p = (char *)target; + + if (type == STORE) + *p = c; + else + c = *p; + + // Do something to prevent the stack frame being popped prior to + // our access above. + getpid(); + } + + return 0; +} + +static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high) +{ + unsigned long start, end; + static char buf[4096]; + char name[128]; + FILE *f; + int rc; + + f = fopen("/proc/self/maps", "r"); + if (!f) { + perror("fopen"); + return -1; + } + + while (fgets(buf, sizeof(buf), f)) { + rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n", + &start, &end, name); + if (rc == 2) + continue; + + if (rc != 3) { + printf("sscanf errored\n"); + rc = -1; + break; + } + + if (strstr(name, needle)) { + *low = start; + *high = end - 1; + rc = 0; + break; + } + } + + fclose(f); + + return rc; +} + +int child(unsigned int stack_used, int delta, enum access_type type) +{ + unsigned long low, stack_high; + + assert(search_proc_maps("[stack]", &low, &stack_high) == 0); + + assert(consume_stack(stack_high - stack_used, stack_high, delta, type) == 0); + + printf("Access OK: %s delta %-7d used size 0x%06x stack high 0x%lx top_ptr %p top sp 0x%lx actual used 0x%lx\n", + type == LOAD ? "load" : "store", delta, stack_used, stack_high, + stack_top_ptr, stack_top_sp, stack_high - stack_top_sp + 1); + + return 0; +} + +static int test_one(unsigned int stack_used, int delta, enum access_type type) +{ + pid_t pid; + int rc; + + pid = fork(); + if (pid == 0) + exit(child(stack_used, delta, type)); + + assert(waitpid(pid, &rc, 0) != -1); + + if (WIFEXITED(rc) && WEXITSTATUS(rc) == 0) + return 0; + + // We don't expect a non-zero exit that's not a signal + assert(!WIFEXITED(rc)); + + printf("Faulted: %s delta %-7d used size 0x%06x signal %d\n", + type == LOAD ? "load" : "store", delta, stack_used, + WTERMSIG(rc)); + + return 1; +} + +// This is fairly arbitrary but is well below any of the targets below, +// so that the delta between the stack pointer and the target is large. +#define DEFAULT_SIZE (32 * _KB) + +static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur) +{ + assert(test_one(DEFAULT_SIZE, 512 * _KB, type) == 0); + + // powerpc has a special case to allow up to 1MB + assert(test_one(DEFAULT_SIZE, 1 * _MB, type) == 0); + +#ifdef __powerpc__ + // This fails on powerpc because it's > 1MB and is not a stdu & + // not close to r1 + assert(test_one(DEFAULT_SIZE, 1 * _MB + 8, type) != 0); +#else + assert(test_one(DEFAULT_SIZE, 1 * _MB + 8, type) == 0); +#endif + +#ifdef __powerpc__ + // Accessing way past the stack pointer is not allowed on powerpc + assert(test_one(DEFAULT_SIZE, rlim_cur, type) != 0); +#else + // We should be able to access anywhere within the rlimit + assert(test_one(DEFAULT_SIZE, rlim_cur, type) == 0); +#endif + + // But if we go past the rlimit it should fail + assert(test_one(DEFAULT_SIZE, rlim_cur + 1, type) != 0); + + // Above 1MB powerpc only allows accesses within 2048 bytes of + // r1 for accesses that aren't stdu + assert(test_one(1 * _MB + page_size - 128, -2048, type) == 0); +#ifdef __powerpc__ + assert(test_one(1 * _MB + page_size - 128, -2049, type) != 0); +#else + assert(test_one(1 * _MB + page_size - 128, -2049, type) == 0); +#endif + + // By consuming 2MB of stack we test the stdu case + assert(test_one(2 * _MB + page_size - 128, -2048, type) == 0); +} + +static int test(void) +{ + unsigned long page_size; + struct rlimit rlimit; + + page_size = getpagesize(); + getrlimit(RLIMIT_STACK, &rlimit); + printf("Stack rlimit is 0x%lx\n", rlimit.rlim_cur); + + printf("Testing loads ...\n"); + test_one_type(LOAD, page_size, rlimit.rlim_cur); + printf("Testing stores ...\n"); + test_one_type(STORE, page_size, rlimit.rlim_cur); + + printf("All OK\n"); + + return 0; +} + +#ifdef __powerpc__ +#include "utils.h" + +int main(void) +{ + return test_harness(test, "stack_expansion_ldst"); +} +#else +int main(void) +{ + return test(); +} +#endif diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c b/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c new file mode 100644 index 000000000000..c8b32a29e274 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test that signal delivery is able to expand the stack segment without + * triggering a SEGV. + * + * Based on test code by Tom Lane. + */ + +#include +#include +#include +#include +#include +#include + +#include "../pmu/lib.h" +#include "utils.h" + +#define _KB (1024) +#define _MB (1024 * 1024) + +static char *stack_base_ptr; +static char *stack_top_ptr; + +static volatile sig_atomic_t sig_occurred = 0; + +static void sigusr1_handler(int signal_arg) +{ + sig_occurred = 1; +} + +static int consume_stack(unsigned int stack_size, union pipe write_pipe) +{ + char stack_cur; + + if ((stack_base_ptr - &stack_cur) < stack_size) + return consume_stack(stack_size, write_pipe); + else { + stack_top_ptr = &stack_cur; + + FAIL_IF(notify_parent(write_pipe)); + + while (!sig_occurred) + barrier(); + } + + return 0; +} + +static int child(unsigned int stack_size, union pipe write_pipe) +{ + struct sigaction act; + char stack_base; + + act.sa_handler = sigusr1_handler; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + if (sigaction(SIGUSR1, &act, NULL) < 0) + err(1, "sigaction"); + + stack_base_ptr = (char *) (((size_t) &stack_base + 65535) & ~65535UL); + + FAIL_IF(consume_stack(stack_size, write_pipe)); + + printf("size 0x%06x: OK, stack base %p top %p (%zx used)\n", + stack_size, stack_base_ptr, stack_top_ptr, + stack_base_ptr - stack_top_ptr); + + return 0; +} + +static int test_one_size(unsigned int stack_size) +{ + union pipe read_pipe, write_pipe; + pid_t pid; + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + close(read_pipe.read_fd); + close(write_pipe.write_fd); + exit(child(stack_size, read_pipe)); + } + + close(read_pipe.write_fd); + close(write_pipe.read_fd); + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + kill(pid, SIGUSR1); + + FAIL_IF(wait_for_child(pid)); + + close(read_pipe.read_fd); + close(write_pipe.write_fd); + + return 0; +} + +int test(void) +{ + unsigned int i, size; + + // Test with used stack from 1MB - 64K to 1MB + 64K + // Increment by 64 to get more coverage of odd sizes + for (i = 0; i < (128 * _KB); i += 64) { + size = i + (1 * _MB) - (64 * _KB); + FAIL_IF(test_one_size(size)); + } + + return 0; +} + +int main(void) +{ + return test_harness(test, "stack_expansion_signal"); +} diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h index fa12e7d0b4d3..bf1bec013bbb 100644 --- a/tools/testing/selftests/powerpc/pmu/lib.h +++ b/tools/testing/selftests/powerpc/pmu/lib.h @@ -6,6 +6,7 @@ #ifndef __SELFTESTS_POWERPC_PMU_LIB_H #define __SELFTESTS_POWERPC_PMU_LIB_H +#include #include #include #include -- cgit v1.2.3 From 63dee5df43a31f3844efabc58972f0a206ca4534 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 19:25:25 +1000 Subject: powerpc: Allow 4224 bytes of stack expansion for the signal frame We have powerpc specific logic in our page fault handling to decide if an access to an unmapped address below the stack pointer should expand the stack VMA. The code was originally added in 2004 "ported from 2.4". The rough logic is that the stack is allowed to grow to 1MB with no extra checking. Over 1MB the access must be within 2048 bytes of the stack pointer, or be from a user instruction that updates the stack pointer. The 2048 byte allowance below the stack pointer is there to cover the 288 byte "red zone" as well as the "about 1.5kB" needed by the signal delivery code. Unfortunately since then the signal frame has expanded, and is now 4224 bytes on 64-bit kernels with transactional memory enabled. This means if a process has consumed more than 1MB of stack, and its stack pointer lies less than 4224 bytes from the next page boundary, signal delivery will fault when trying to expand the stack and the process will see a SEGV. The total size of the signal frame is the size of struct rt_sigframe (which includes the red zone) plus __SIGNAL_FRAMESIZE (128 bytes on 64-bit). The 2048 byte allowance was correct until 2008 as the signal frame was: struct rt_sigframe { struct ucontext uc; /* 0 1440 */ /* --- cacheline 11 boundary (1408 bytes) was 32 bytes ago --- */ long unsigned int _unused[2]; /* 1440 16 */ unsigned int tramp[6]; /* 1456 24 */ struct siginfo * pinfo; /* 1480 8 */ void * puc; /* 1488 8 */ struct siginfo info; /* 1496 128 */ /* --- cacheline 12 boundary (1536 bytes) was 88 bytes ago --- */ char abigap[288]; /* 1624 288 */ /* size: 1920, cachelines: 15, members: 7 */ /* padding: 8 */ }; 1920 + 128 = 2048 Then in commit ce48b2100785 ("powerpc: Add VSX context save/restore, ptrace and signal support") (Jul 2008) the signal frame expanded to 2304 bytes: struct rt_sigframe { struct ucontext uc; /* 0 1696 */ <-- /* --- cacheline 13 boundary (1664 bytes) was 32 bytes ago --- */ long unsigned int _unused[2]; /* 1696 16 */ unsigned int tramp[6]; /* 1712 24 */ struct siginfo * pinfo; /* 1736 8 */ void * puc; /* 1744 8 */ struct siginfo info; /* 1752 128 */ /* --- cacheline 14 boundary (1792 bytes) was 88 bytes ago --- */ char abigap[288]; /* 1880 288 */ /* size: 2176, cachelines: 17, members: 7 */ /* padding: 8 */ }; 2176 + 128 = 2304 At this point we should have been exposed to the bug, though as far as I know it was never reported. I no longer have a system old enough to easily test on. Then in 2010 commit 320b2b8de126 ("mm: keep a guard page below a grow-down stack segment") caused our stack expansion code to never trigger, as there was always a VMA found for a write up to PAGE_SIZE below r1. That meant the bug was hidden as we continued to expand the signal frame in commit 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context") (Feb 2013): struct rt_sigframe { struct ucontext uc; /* 0 1696 */ /* --- cacheline 13 boundary (1664 bytes) was 32 bytes ago --- */ struct ucontext uc_transact; /* 1696 1696 */ <-- /* --- cacheline 26 boundary (3328 bytes) was 64 bytes ago --- */ long unsigned int _unused[2]; /* 3392 16 */ unsigned int tramp[6]; /* 3408 24 */ struct siginfo * pinfo; /* 3432 8 */ void * puc; /* 3440 8 */ struct siginfo info; /* 3448 128 */ /* --- cacheline 27 boundary (3456 bytes) was 120 bytes ago --- */ char abigap[288]; /* 3576 288 */ /* size: 3872, cachelines: 31, members: 8 */ /* padding: 8 */ /* last cacheline: 32 bytes */ }; 3872 + 128 = 4000 And commit 573ebfa6601f ("powerpc: Increase stack redzone for 64-bit userspace to 512 bytes") (Feb 2014): struct rt_sigframe { struct ucontext uc; /* 0 1696 */ /* --- cacheline 13 boundary (1664 bytes) was 32 bytes ago --- */ struct ucontext uc_transact; /* 1696 1696 */ /* --- cacheline 26 boundary (3328 bytes) was 64 bytes ago --- */ long unsigned int _unused[2]; /* 3392 16 */ unsigned int tramp[6]; /* 3408 24 */ struct siginfo * pinfo; /* 3432 8 */ void * puc; /* 3440 8 */ struct siginfo info; /* 3448 128 */ /* --- cacheline 27 boundary (3456 bytes) was 120 bytes ago --- */ char abigap[512]; /* 3576 512 */ <-- /* size: 4096, cachelines: 32, members: 8 */ /* padding: 8 */ }; 4096 + 128 = 4224 Then finally in 2017, commit 1be7107fbe18 ("mm: larger stack guard gap, between vmas") exposed us to the existing bug, because it changed the stack VMA to be the correct/real size, meaning our stack expansion code is now triggered. Fix it by increasing the allowance to 4224 bytes. Hard-coding 4224 is obviously unsafe against future expansions of the signal frame in the same way as the existing code. We can't easily use sizeof() because the signal frame structure is not in a header. We will either fix that, or rip out all the custom stack expansion checking logic entirely. Fixes: ce48b2100785 ("powerpc: Add VSX context save/restore, ptrace and signal support") Cc: stable@vger.kernel.org # v2.6.27+ Reported-by: Tom Lane Tested-by: Daniel Axtens Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724092528.1578671-2-mpe@ellerman.id.au --- arch/powerpc/mm/fault.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 641fc5f3d7dd..3ebb1792e636 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -267,6 +267,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, return false; } +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE +#define SIGFRAME_MAX_SIZE (4096 + 128) + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma, unsigned int flags, bool *must_retry) @@ -274,7 +277,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB + * The kernel signal delivery code writes a bit over 4KB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to @@ -299,7 +302,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 >= uregs->gpr[1]) + if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1]) return false; if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && -- cgit v1.2.3 From 9ee571d84bf8cfdd587a1acbf3490ca90fc40c9d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 19:25:26 +1000 Subject: selftests/powerpc: Update the stack expansion test Update the stack expansion load/store test to take into account the new allowance of 4224 bytes below the stack pointer. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724092528.1578671-3-mpe@ellerman.id.au --- tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c index 0587e11437f5..8dbfb51acf0f 100644 --- a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c +++ b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c @@ -186,17 +186,17 @@ static void test_one_type(enum access_type type, unsigned long page_size, unsign // But if we go past the rlimit it should fail assert(test_one(DEFAULT_SIZE, rlim_cur + 1, type) != 0); - // Above 1MB powerpc only allows accesses within 2048 bytes of + // Above 1MB powerpc only allows accesses within 4224 bytes of // r1 for accesses that aren't stdu - assert(test_one(1 * _MB + page_size - 128, -2048, type) == 0); + assert(test_one(1 * _MB + page_size - 128, -4224, type) == 0); #ifdef __powerpc__ - assert(test_one(1 * _MB + page_size - 128, -2049, type) != 0); + assert(test_one(1 * _MB + page_size - 128, -4225, type) != 0); #else - assert(test_one(1 * _MB + page_size - 128, -2049, type) == 0); + assert(test_one(1 * _MB + page_size - 128, -4225, type) == 0); #endif // By consuming 2MB of stack we test the stdu case - assert(test_one(2 * _MB + page_size - 128, -2048, type) == 0); + assert(test_one(2 * _MB + page_size - 128, -4224, type) == 0); } static int test(void) -- cgit v1.2.3 From 773b3e53df5b84e73bf64998e4019f50a6662ad1 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 19:25:27 +1000 Subject: powerpc/mm: Remove custom stack expansion checking We have powerpc specific logic in our page fault handling to decide if an access to an unmapped address below the stack pointer should expand the stack VMA. The logic aims to prevent userspace from doing bad accesses below the stack pointer. However as long as the stack is < 1MB in size, we allow all accesses without further checks. Adding some debug I see that I can do a full kernel build and LTP run, and not a single process has used more than 1MB of stack. So for the majority of processes the logic never even fires. We also recently found a nasty bug in this code which could cause userspace programs to be killed during signal delivery. It went unnoticed presumably because most processes use < 1MB of stack. The generic mm code has also grown support for stack guard pages since this code was originally written, so the most heinous case of the stack expanding into other mappings is now handled for us. Finally although some other arches have special logic in this path, from what I can tell none of x86, arm64, arm and s390 impose any extra checks other than those in expand_stack(). So drop our complicated logic and like other architectures just let the stack expand as long as its within the rlimit. Signed-off-by: Michael Ellerman Tested-by: Daniel Axtens Link: https://lore.kernel.org/r/20200724092528.1578671-4-mpe@ellerman.id.au --- arch/powerpc/mm/fault.c | 109 +++--------------------------------------------- 1 file changed, 5 insertions(+), 104 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 3ebb1792e636..925a7231abb3 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -42,39 +42,7 @@ #include #include -/* - * Check whether the instruction inst is a store using - * an update addressing form which will update r1. - */ -static bool store_updates_sp(struct ppc_inst inst) -{ - /* check for 1 in the rA field */ - if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1) - return false; - /* check major opcode */ - switch (ppc_inst_primary_opcode(inst)) { - case OP_STWU: - case OP_STBU: - case OP_STHU: - case OP_STFSU: - case OP_STFDU: - return true; - case OP_STD: /* std or stdu */ - return (ppc_inst_val(inst) & 3) == 1; - case OP_31: - /* check minor opcode */ - switch ((ppc_inst_val(inst) >> 1) & 0x3ff) { - case OP_31_XOP_STDUX: - case OP_31_XOP_STWUX: - case OP_31_XOP_STBUX: - case OP_31_XOP_STHUX: - case OP_31_XOP_STFSUX: - case OP_31_XOP_STFDUX: - return true; - } - } - return false; -} + /* * do_page_fault error handling helpers */ @@ -267,57 +235,6 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, return false; } -// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE -#define SIGFRAME_MAX_SIZE (4096 + 128) - -static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, - struct vm_area_struct *vma, unsigned int flags, - bool *must_retry) -{ - /* - * N.B. The POWER/Open ABI allows programs to access up to - * 288 bytes below the stack pointer. - * The kernel signal delivery code writes a bit over 4KB - * below the stack pointer (r1) before decrementing it. - * The exec code can write slightly over 640kB to the stack - * before setting the user r1. Thus we allow the stack to - * expand to 1MB without further checks. - */ - if (address + 0x100000 < vma->vm_end) { - struct ppc_inst __user *nip = (struct ppc_inst __user *)regs->nip; - /* get user regs even if this fault is in kernel mode */ - struct pt_regs *uregs = current->thread.regs; - if (uregs == NULL) - return true; - - /* - * A user-mode access to an address a long way below - * the stack pointer is only valid if the instruction - * is one which would update the stack pointer to the - * address accessed if the instruction completed, - * i.e. either stwu rs,n(r1) or stwux rs,r1,rb - * (or the byte, halfword, float or double forms). - * - * If we don't check this then any write to the area - * between the last mapped region and the stack will - * expand the stack rather than segfaulting. - */ - if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1]) - return false; - - if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && - access_ok(nip, sizeof(*nip))) { - struct ppc_inst inst; - - if (!probe_user_read_inst(&inst, nip)) - return !store_updates_sp(inst); - *must_retry = true; - } - return true; - } - return false; -} - #ifdef CONFIG_PPC_MEM_KEYS static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey, struct vm_area_struct *vma) @@ -483,7 +400,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, int is_user = user_mode(regs); int is_write = page_fault_is_write(error_code); vm_fault_t fault, major = 0; - bool must_retry = false; bool kprobe_fault = kprobe_page_fault(regs, 11); if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) @@ -572,30 +488,15 @@ retry: vma = find_vma(mm, address); if (unlikely(!vma)) return bad_area(regs, address); - if (likely(vma->vm_start <= address)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) - return bad_area(regs, address); - /* The stack is being expanded, check if it's valid */ - if (unlikely(bad_stack_expansion(regs, address, vma, flags, - &must_retry))) { - if (!must_retry) + if (unlikely(vma->vm_start > address)) { + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) return bad_area(regs, address); - mmap_read_unlock(mm); - if (fault_in_pages_readable((const char __user *)regs->nip, - sizeof(unsigned int))) - return bad_area_nosemaphore(regs, address); - goto retry; + if (unlikely(expand_stack(vma, address))) + return bad_area(regs, address); } - /* Try to expand it */ - if (unlikely(expand_stack(vma, address))) - return bad_area(regs, address); - -good_area: - #ifdef CONFIG_PPC_MEM_KEYS if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) -- cgit v1.2.3 From 73da08f6966b81feb429af4fb3229da4cf21d6d9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 19:25:28 +1000 Subject: selftests/powerpc: Remove powerpc special cases from stack expansion test Now that the powerpc code behaves the same as other architectures we can drop the special cases we had. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724092528.1578671-5-mpe@ellerman.id.au --- .../selftests/powerpc/mm/stack_expansion_ldst.c | 41 +++------------------- 1 file changed, 5 insertions(+), 36 deletions(-) diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c index 8dbfb51acf0f..ed9143990888 100644 --- a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c +++ b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c @@ -56,13 +56,7 @@ int consume_stack(unsigned long target_sp, unsigned long stack_high, int delta, #else asm volatile ("mov %%rsp, %[sp]" : [sp] "=r" (stack_top_sp)); #endif - - // Kludge, delta < 0 indicates relative to SP - if (delta < 0) - target = stack_top_sp + delta; - else - target = stack_high - delta + 1; - + target = stack_high - delta + 1; volatile char *p = (char *)target; if (type == STORE) @@ -162,41 +156,16 @@ static int test_one(unsigned int stack_used, int delta, enum access_type type) static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur) { - assert(test_one(DEFAULT_SIZE, 512 * _KB, type) == 0); + unsigned long delta; - // powerpc has a special case to allow up to 1MB - assert(test_one(DEFAULT_SIZE, 1 * _MB, type) == 0); - -#ifdef __powerpc__ - // This fails on powerpc because it's > 1MB and is not a stdu & - // not close to r1 - assert(test_one(DEFAULT_SIZE, 1 * _MB + 8, type) != 0); -#else - assert(test_one(DEFAULT_SIZE, 1 * _MB + 8, type) == 0); -#endif - -#ifdef __powerpc__ - // Accessing way past the stack pointer is not allowed on powerpc - assert(test_one(DEFAULT_SIZE, rlim_cur, type) != 0); -#else // We should be able to access anywhere within the rlimit + for (delta = page_size; delta <= rlim_cur; delta += page_size) + assert(test_one(DEFAULT_SIZE, delta, type) == 0); + assert(test_one(DEFAULT_SIZE, rlim_cur, type) == 0); -#endif // But if we go past the rlimit it should fail assert(test_one(DEFAULT_SIZE, rlim_cur + 1, type) != 0); - - // Above 1MB powerpc only allows accesses within 4224 bytes of - // r1 for accesses that aren't stdu - assert(test_one(1 * _MB + page_size - 128, -4224, type) == 0); -#ifdef __powerpc__ - assert(test_one(1 * _MB + page_size - 128, -4225, type) != 0); -#else - assert(test_one(1 * _MB + page_size - 128, -4225, type) == 0); -#endif - - // By consuming 2MB of stack we test the stdu case - assert(test_one(2 * _MB + page_size - 128, -4224, type) == 0); } static int test(void) -- cgit v1.2.3 From 55548a86ebde2b3691b6a84baef1b02933408994 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 27 Jul 2020 15:27:04 +0530 Subject: powerpc/mm: Limit resize_hpt_for_hotplug() call to hash guests only During memory hotplug and unplug, resize_hpt_for_hotplug() gets called for both hash and radix guests but it should be called only for hash guests. Though the call does nothing in the radix guest case, it is cleaner to push this call into hash specific memory hotplug routines. Reported-by: Nathan Lynch Signed-off-by: Bharata B Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727095704.1432916-1-bharata@linux.ibm.com --- arch/powerpc/include/asm/sparsemem.h | 6 ------ arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++- arch/powerpc/mm/mem.c | 5 ----- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index c89b32443cff..1e6fa371cc38 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h @@ -17,12 +17,6 @@ extern int create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot); extern int remove_section_mapping(unsigned long start, unsigned long end); -#ifdef CONFIG_PPC_BOOK3S_64 -extern int resize_hpt_for_hotplug(unsigned long new_mem_size); -#else -static inline int resize_hpt_for_hotplug(unsigned long new_mem_size) { return 0; } -#endif - #ifdef CONFIG_NUMA extern int hot_add_scn_to_nid(unsigned long scn_addr); #else diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 9fdabea04990..30a4a91d9987 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -785,7 +785,7 @@ static unsigned long __init htab_get_table_size(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int resize_hpt_for_hotplug(unsigned long new_mem_size) +static int resize_hpt_for_hotplug(unsigned long new_mem_size) { unsigned target_hpt_shift; @@ -819,6 +819,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end, return -1; } + resize_hpt_for_hotplug(memblock_phys_mem_size()); + rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(prot), mmu_linear_psize, mmu_kernel_ssize); @@ -836,6 +838,10 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end) int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); WARN_ON(rc < 0); + + if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) + pr_warn("Hash collision while resizing HPT\n"); + return rc; } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..9dafc636588f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -127,8 +127,6 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, unsigned long nr_pages = size >> PAGE_SHIFT; int rc; - resize_hpt_for_hotplug(memblock_phys_mem_size()); - start = (unsigned long)__va(start); rc = create_section_mapping(start, start + size, nid, params->pgprot); @@ -161,9 +159,6 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, * hit that section of memory */ vm_unmap_aliases(); - - if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) - pr_warn("Hash collision while resizing HPT\n"); } #endif -- cgit v1.2.3 From fbb44c9a08ef994109947c5439e649b18ad509ac Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:20 +1000 Subject: powerpc/configs: Drop old symbols from ppc6xx_defconfig ppc6xx_defconfig refers to quite a few symbols that no longer exist, as reported by scripts/checkkconfigsymbols.py, remove them. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-1-mpe@ellerman.id.au --- arch/powerpc/configs/ppc6xx_defconfig | 39 ----------------------------------- 1 file changed, 39 deletions(-) diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index feb5d47d8d1e..5e6f92ba3210 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -53,7 +53,6 @@ CONFIG_MPC836x_MDS=y CONFIG_MPC836x_RDK=y CONFIG_MPC837x_MDS=y CONFIG_MPC837x_RDB=y -CONFIG_SBC834x=y CONFIG_ASP834x=y CONFIG_PPC_86xx=y CONFIG_MPC8641_HPCN=y @@ -187,7 +186,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -203,7 +201,6 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -241,7 +238,6 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_IP_DCCP=m -CONFIG_NET_DCCPPROBE=m CONFIG_TIPC=m CONFIG_ATM=m CONFIG_ATM_CLIP=m @@ -251,7 +247,6 @@ CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_DECNET=m CONFIG_DECNET_ROUTER=y -CONFIG_IPX=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m @@ -297,26 +292,6 @@ CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m -CONFIG_IRDA=m -CONFIG_IRLAN=m -CONFIG_IRNET=m -CONFIG_IRCOMM=m -CONFIG_IRDA_CACHE_LAST_LSAP=y -CONFIG_IRDA_FAST_RR=y -CONFIG_IRTTY_SIR=m -CONFIG_KINGSUN_DONGLE=m -CONFIG_KSDAZZLE_DONGLE=m -CONFIG_KS959_DONGLE=m -CONFIG_USB_IRDA=m -CONFIG_SIGMATEL_FIR=m -CONFIG_NSC_FIR=m -CONFIG_WINBOND_FIR=m -CONFIG_TOSHIBA_FIR=m -CONFIG_SMC_IRCC_FIR=m -CONFIG_ALI_FIR=m -CONFIG_VLSI_FIR=m -CONFIG_VIA_FIR=m -CONFIG_MCS_FIR=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y @@ -332,7 +307,6 @@ CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIDTL1=m CONFIG_BT_HCIBT3C=m CONFIG_BT_HCIBLUECARD=m -CONFIG_BT_HCIBTUART=m CONFIG_BT_HCIVHCI=m CONFIG_CFG80211=m CONFIG_MAC80211=m @@ -366,7 +340,6 @@ CONFIG_EEPROM_93CX6=m CONFIG_RAID_ATTRS=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m @@ -663,7 +636,6 @@ CONFIG_I2C_MPC=m CONFIG_I2C_PCA_PLATFORM=m CONFIG_I2C_SIMTEC=m CONFIG_I2C_PARPORT=m -CONFIG_I2C_PARPORT_LIGHT=m CONFIG_I2C_TINY_USB=m CONFIG_I2C_PCA_ISA=m CONFIG_I2C_STUB=m @@ -676,7 +648,6 @@ CONFIG_W1_SLAVE_THERM=m CONFIG_W1_SLAVE_SMEM=m CONFIG_W1_SLAVE_DS2433=m CONFIG_W1_SLAVE_DS2433_CRC=y -CONFIG_W1_SLAVE_DS2760=m CONFIG_APM_POWER=m CONFIG_BATTERY_PMU=m CONFIG_HWMON=m @@ -1065,15 +1036,6 @@ CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_CIFS_DFS_UPCALL=y -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_SMALLDOS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y CONFIG_CODA_FS=m CONFIG_9P_FS=m CONFIG_NLS_DEFAULT="utf8" @@ -1117,7 +1079,6 @@ CONFIG_NLS_KOI8_U=m CONFIG_DEBUG_INFO=y CONFIG_UNUSED_SYMBOLS=y CONFIG_HEADERS_INSTALL=y -CONFIG_HEADERS_CHECK=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_OBJECTS=y -- cgit v1.2.3 From 0fcce25b7743d634cc1ddce83382f51333933f76 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:21 +1000 Subject: powerpc/configs: Remove dead symbols Remove references to symbols that no longer exist as reported by scripts/checkkconfigsymbols.py. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-2-mpe@ellerman.id.au --- arch/powerpc/configs/44x/akebono_defconfig | 1 - arch/powerpc/configs/85xx/xes_mpc85xx_defconfig | 3 --- arch/powerpc/configs/86xx-hw.config | 2 -- arch/powerpc/configs/fsl-emb-nonhw.config | 1 - arch/powerpc/configs/g5_defconfig | 1 - arch/powerpc/configs/linkstation_defconfig | 1 - arch/powerpc/configs/mpc512x_defconfig | 1 - arch/powerpc/configs/mpc83xx_defconfig | 1 - arch/powerpc/configs/mvme5100_defconfig | 1 - arch/powerpc/configs/pasemi_defconfig | 1 - arch/powerpc/configs/pmac32_defconfig | 8 -------- arch/powerpc/configs/powernv_defconfig | 1 - arch/powerpc/configs/ppc40x_defconfig | 3 --- arch/powerpc/configs/ppc64_defconfig | 1 - arch/powerpc/configs/pseries_defconfig | 1 - 15 files changed, 27 deletions(-) diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig index 60d5fa2c3b93..3894ba8f8ffc 100644 --- a/arch/powerpc/configs/44x/akebono_defconfig +++ b/arch/powerpc/configs/44x/akebono_defconfig @@ -56,7 +56,6 @@ CONFIG_BLK_DEV_SD=y # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set CONFIG_IBM_EMAC=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MELLANOX is not set diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig index d50aca608736..3a6381aa9fdc 100644 --- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig +++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig @@ -51,9 +51,6 @@ CONFIG_NET_IPIP=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set CONFIG_MTD=y CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config index 151164cf8cb3..0cb24b33c88e 100644 --- a/arch/powerpc/configs/86xx-hw.config +++ b/arch/powerpc/configs/86xx-hw.config @@ -32,8 +32,6 @@ CONFIG_HW_RANDOM=y CONFIG_HZ_1000=y CONFIG_I2C_MPC=y CONFIG_I2C=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSEDEV is not set diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config index 3c7dad19a691..df37efed0aec 100644 --- a/arch/powerpc/configs/fsl-emb-nonhw.config +++ b/arch/powerpc/configs/fsl-emb-nonhw.config @@ -56,7 +56,6 @@ CONFIG_IKCONFIG=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y -# CONFIG_INET_XFRM_MODE_BEET is not set CONFIG_INET=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MROUTE=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index a68c7f3af10e..1c674c4c1d86 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -51,7 +51,6 @@ CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_LOOP=y diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index ea59f3d146df..d4be64f190ff 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig @@ -37,7 +37,6 @@ CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index e39346b3dc3b..e75d3f3060c9 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -47,7 +47,6 @@ CONFIG_MTD_UBI=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=1 CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_BLK_DEV_RAM_DAX=y CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y CONFIG_SCSI=y diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index be125729635c..95d43f8a3869 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig @@ -19,7 +19,6 @@ CONFIG_MPC836x_MDS=y CONFIG_MPC836x_RDK=y CONFIG_MPC837x_MDS=y CONFIG_MPC837x_RDB=y -CONFIG_SBC834x=y CONFIG_ASP834x=y CONFIG_QE_GPIO=y CONFIG_MATH_EMULATION=y diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig index 3d53d69ed36c..1fed6be95d53 100644 --- a/arch/powerpc/configs/mvme5100_defconfig +++ b/arch/powerpc/configs/mvme5100_defconfig @@ -45,7 +45,6 @@ CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 08b7f4cef243..af9af03059e4 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -58,7 +58,6 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y -CONFIG_CHR_DEV_OSST=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 05e325ca3fbd..665a8d7cded0 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -75,7 +75,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -90,13 +89,6 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_IP_DCCP=m -CONFIG_IRDA=m -CONFIG_IRLAN=m -CONFIG_IRNET=m -CONFIG_IRCOMM=m -CONFIG_IRDA_CACHE_LAST_LSAP=y -CONFIG_IRDA_FAST_RR=y -CONFIG_IRTTY_SIR=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index afc0dd73a1e6..cf30fc24413b 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -245,7 +245,6 @@ CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_CXGB3=m CONFIG_INFINIBAND_CXGB4=m CONFIG_MLX4_INFINIBAND=m CONFIG_INFINIBAND_IPOIB=m diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 25f6c91e843a..7e48693775f4 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -20,9 +20,6 @@ CONFIG_INET=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set CONFIG_CONNECTOR=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 8d7e3e98856d..48759656a067 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -268,7 +268,6 @@ CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_CXGB3=m CONFIG_INFINIBAND_CXGB4=m CONFIG_MLX4_INFINIBAND=m CONFIG_INFINIBAND_IPOIB=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 894e8d85fb48..efd5398928d5 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -223,7 +223,6 @@ CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_CXGB3=m CONFIG_INFINIBAND_CXGB4=m CONFIG_MLX4_INFINIBAND=m CONFIG_INFINIBAND_IPOIB=m -- cgit v1.2.3 From 8cdcde5f76a42d53a50d1fc9e1fbfc9b90102323 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:22 +1000 Subject: powerpc/52xx: Fix comment about CONFIG_BDI* There's a comment in lite5200_sleep.S that refers to "CONFIG_BDI*". This confuses scripts/checkkconfigsymbols.py, which thinks it should be able to find CONFIG_BDI. Change the comment to refer to CONFIG_BDI_SWITCH which is presumably roughly what it was referring to. AFAICS there never has been a CONFIG_BDI. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-3-mpe@ellerman.id.au --- arch/powerpc/platforms/52xx/lite5200_sleep.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 70083649c9ea..11475c58ea43 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -56,7 +56,7 @@ lite5200_low_power: /* * save stuff BDI overwrites * 0xf0 (0xe0->0x100 gets overwritten when BDI connected; - * even when CONFIG_BDI* is disabled and MMU XLAT commented; heisenbug?)) + * even when CONFIG_BDI_SWITCH is disabled and MMU XLAT commented; heisenbug?)) * WARNING: self-refresh doesn't seem to work when BDI2000 is connected, * possibly because BDI sets SDRAM registers before wakeup code does */ -- cgit v1.2.3 From 07e571ea59eef518730f983f4203651ea413f2cf Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:23 +1000 Subject: powerpc/64e: Drop dead BOOK3E_MMU_TLB_STATS code This code was merged 11 years ago in commit 13363ab9b9d0 ("powerpc: Add definitions used by exception handling on 64-bit Book3E") but was never able to be built because CONFIG_BOOK3E_MMU_TLB_STATS never existed. Remove it. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-4-mpe@ellerman.id.au --- arch/powerpc/include/asm/exception-64e.h | 53 +------------------------------- arch/powerpc/mm/nohash/tlb_low_64e.S | 47 ++-------------------------- 2 files changed, 4 insertions(+), 96 deletions(-) diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index 72b6657acd2d..40cdcb2fb057 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -66,14 +66,7 @@ #define EX_TLB_SRR0 (10 * 8) #define EX_TLB_SRR1 (11 * 8) #define EX_TLB_R7 (12 * 8) -#ifdef CONFIG_BOOK3E_MMU_TLB_STATS -#define EX_TLB_R8 (13 * 8) -#define EX_TLB_R9 (14 * 8) -#define EX_TLB_LR (15 * 8) -#define EX_TLB_SIZE (16 * 8) -#else #define EX_TLB_SIZE (13 * 8) -#endif #define START_EXCEPTION(label) \ .globl exc_##label##_book3e; \ @@ -110,8 +103,7 @@ exc_##label##_book3e: std r11,EX_TLB_R12(r12); \ mtspr SPRN_SPRG_TLB_EXFRAME,r14; \ std r15,EX_TLB_SRR1(r12); \ - std r16,EX_TLB_SRR0(r12); \ - TLB_MISS_PROLOG_STATS + std r16,EX_TLB_SRR0(r12); /* And these are the matching epilogs that restores things * @@ -143,7 +135,6 @@ exc_##label##_book3e: mtspr SPRN_SRR0,r15; \ ld r15,EX_TLB_R15(r12); \ mtspr SPRN_SRR1,r16; \ - TLB_MISS_RESTORE_STATS \ ld r16,EX_TLB_R16(r12); \ ld r12,EX_TLB_R12(r12); \ @@ -158,48 +149,6 @@ exc_##label##_book3e: addi r11,r13,PACA_EXTLB; \ TLB_MISS_RESTORE(r11) -#ifdef CONFIG_BOOK3E_MMU_TLB_STATS -#define TLB_MISS_PROLOG_STATS \ - mflr r10; \ - std r8,EX_TLB_R8(r12); \ - std r9,EX_TLB_R9(r12); \ - std r10,EX_TLB_LR(r12); -#define TLB_MISS_RESTORE_STATS \ - ld r16,EX_TLB_LR(r12); \ - ld r9,EX_TLB_R9(r12); \ - ld r8,EX_TLB_R8(r12); \ - mtlr r16; -#define TLB_MISS_STATS_D(name) \ - addi r9,r13,MMSTAT_DSTATS+name; \ - bl tlb_stat_inc; -#define TLB_MISS_STATS_I(name) \ - addi r9,r13,MMSTAT_ISTATS+name; \ - bl tlb_stat_inc; -#define TLB_MISS_STATS_X(name) \ - ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ - cmpdi cr2,r8,-1; \ - beq cr2,61f; \ - addi r9,r13,MMSTAT_DSTATS+name; \ - b 62f; \ -61: addi r9,r13,MMSTAT_ISTATS+name; \ -62: bl tlb_stat_inc; -#define TLB_MISS_STATS_SAVE_INFO \ - std r14,EX_TLB_ESR(r12); /* save ESR */ -#define TLB_MISS_STATS_SAVE_INFO_BOLTED \ - std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */ -#else -#define TLB_MISS_PROLOG_STATS -#define TLB_MISS_RESTORE_STATS -#define TLB_MISS_PROLOG_STATS_BOLTED -#define TLB_MISS_RESTORE_STATS_BOLTED -#define TLB_MISS_STATS_D(name) -#define TLB_MISS_STATS_I(name) -#define TLB_MISS_STATS_X(name) -#define TLB_MISS_STATS_Y(name) -#define TLB_MISS_STATS_SAVE_INFO -#define TLB_MISS_STATS_SAVE_INFO_BOLTED -#endif - #define SET_IVOR(vector_number, vector_offset) \ LOAD_REG_ADDR(r3,interrupt_base_book3e);\ ori r3,r3,vector_offset@l; \ diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index d5e2704d0096..bf24451f3e71 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -71,7 +71,6 @@ START_BTB_FLUSH_SECTION END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) #endif - TLB_MISS_PROLOG_STATS .endm .macro tlb_epilog_bolted @@ -85,7 +84,6 @@ END_BTB_FLUSH_SECTION mtcr r14 ld r14,EX_TLB_R14(r12) ld r15,EX_TLB_R15(r12) - TLB_MISS_RESTORE_STATS ld r16,EX_TLB_R16(r12) mfspr r12,SPRN_SPRG_GEN_SCRATCH .endm @@ -128,7 +126,6 @@ END_BTB_FLUSH_SECTION ori r10,r10,_PAGE_PRESENT oris r11,r10,_PAGE_ACCESSED@h - TLB_MISS_STATS_SAVE_INFO_BOLTED bne tlb_miss_kernel_bolted tlb_miss_common_bolted: @@ -209,7 +206,6 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) tlbwe tlb_miss_done_bolted: - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) tlb_epilog_bolted rfi @@ -229,11 +225,9 @@ tlb_miss_fault_bolted: andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX bne itlb_miss_fault_bolted dtlb_miss_fault_bolted: - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) tlb_epilog_bolted b exc_data_storage_book3e itlb_miss_fault_bolted: - TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) tlb_epilog_bolted b exc_instruction_storage_book3e @@ -243,7 +237,6 @@ itlb_miss_fault_bolted: rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 srdi r15,r16,60 /* get region */ - TLB_MISS_STATS_SAVE_INFO_BOLTED bne- itlb_miss_fault_bolted li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */ @@ -276,7 +269,6 @@ itlb_miss_fault_bolted: srdi. r15,r16,60 /* get region */ ori r16,r16,1 - TLB_MISS_STATS_SAVE_INFO_BOLTED bne tlb_miss_kernel_e6500 /* user/kernel test */ b tlb_miss_common_e6500 @@ -288,7 +280,6 @@ itlb_miss_fault_bolted: srdi. r15,r16,60 /* get region */ rldicr r16,r16,0,62 - TLB_MISS_STATS_SAVE_INFO_BOLTED bne tlb_miss_kernel_e6500 /* user vs kernel check */ /* @@ -460,7 +451,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SMT) .endm tlb_unlock_e6500 - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) tlb_epilog_bolted rfi @@ -519,11 +509,9 @@ tlb_miss_fault_e6500: andi. r16,r16,1 bne itlb_miss_fault_e6500 dtlb_miss_fault_e6500: - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) tlb_epilog_bolted b exc_data_storage_book3e itlb_miss_fault_e6500: - TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) tlb_epilog_bolted b exc_instruction_storage_book3e #endif /* CONFIG_PPC_FSL_BOOK3E */ @@ -548,7 +536,6 @@ itlb_miss_fault_e6500: mfspr r16,SPRN_DEAR /* get faulting address */ srdi r15,r16,60 /* get region */ cmpldi cr0,r15,0xc /* linear mapping ? */ - TLB_MISS_STATS_SAVE_INFO beq tlb_load_linear /* yes -> go to linear map load */ /* The page tables are mapped virtually linear. At this point, though, @@ -600,7 +587,6 @@ itlb_miss_fault_e6500: /* We got a crappy address, just fault with whatever DEAR and ESR * are here */ - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e @@ -624,7 +610,6 @@ itlb_miss_fault_e6500: */ srdi r15,r16,60 /* get region */ cmpldi cr0,r15,0xc /* linear mapping ? */ - TLB_MISS_STATS_SAVE_INFO beq tlb_load_linear /* yes -> go to linear map load */ /* We do the user/kernel test for the PID here along with the RW test @@ -646,7 +631,6 @@ itlb_miss_fault_e6500: beq+ normal_tlb_miss /* We got a crappy address, just fault */ - TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e @@ -745,7 +729,6 @@ normal_tlb_miss_done: * level 0 and just going back to userland. They are only needed * if you are going to take an access fault */ - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) TLB_MISS_EPILOG_SUCCESS rfi @@ -757,11 +740,9 @@ normal_tlb_miss_access_fault: ld r15,EX_TLB_ESR(r12) mtspr SPRN_DEAR,r14 mtspr SPRN_ESR,r15 - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e -1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) - TLB_MISS_EPILOG_ERROR +1: TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e @@ -899,7 +880,6 @@ virt_page_table_tlb_miss_done: 1: END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) /* Return to caller, normal case */ - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK); TLB_MISS_EPILOG_SUCCESS rfi @@ -935,18 +915,15 @@ virt_page_table_tlb_miss_fault: beq 1f mtspr SPRN_DEAR,r15 mtspr SPRN_ESR,r16 - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT); TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e -1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT); - TLB_MISS_EPILOG_ERROR +1: TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e virt_page_table_tlb_miss_whacko_fault: /* The linear fault will restart everything so ESR and DEAR will * not have been clobbered, let's just fault with what we have */ - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT); TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e @@ -971,7 +948,6 @@ virt_page_table_tlb_miss_whacko_fault: mfspr r16,SPRN_DEAR /* get faulting address */ srdi r11,r16,60 /* get region */ cmpldi cr0,r11,0xc /* linear mapping ? */ - TLB_MISS_STATS_SAVE_INFO beq tlb_load_linear /* yes -> go to linear map load */ /* We do the user/kernel test for the PID here along with the RW test @@ -991,7 +967,6 @@ virt_page_table_tlb_miss_whacko_fault: /* We got a crappy address, just fault with whatever DEAR and ESR * are here */ - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e @@ -1015,7 +990,6 @@ virt_page_table_tlb_miss_whacko_fault: */ srdi r11,r16,60 /* get region */ cmpldi cr0,r11,0xc /* linear mapping ? */ - TLB_MISS_STATS_SAVE_INFO beq tlb_load_linear /* yes -> go to linear map load */ /* We do the user/kernel test for the PID here along with the RW test @@ -1033,7 +1007,6 @@ virt_page_table_tlb_miss_whacko_fault: beq+ htw_tlb_miss /* We got a crappy address, just fault */ - TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e @@ -1130,7 +1103,6 @@ htw_tlb_miss_done: * level 0 and just going back to userland. They are only needed * if you are going to take an access fault */ - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK) TLB_MISS_EPILOG_SUCCESS rfi @@ -1142,11 +1114,9 @@ htw_tlb_miss_fault: beq 1f mtspr SPRN_DEAR,r16 mtspr SPRN_ESR,r14 - TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT) TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e -1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT) - TLB_MISS_EPILOG_ERROR +1: TLB_MISS_EPILOG_ERROR b exc_instruction_storage_book3e /* @@ -1221,7 +1191,6 @@ tlb_load_linear_done: * We do that because we can't resume a fault within a TLB * miss handler, due to MAS and TLB reservation being clobbered. */ - TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR) TLB_MISS_EPILOG_ERROR rfi @@ -1233,13 +1202,3 @@ tlb_load_linear_fault: b exc_data_storage_book3e 1: TLB_MISS_EPILOG_ERROR_SPECIAL b exc_instruction_storage_book3e - - -#ifdef CONFIG_BOOK3E_MMU_TLB_STATS -.tlb_stat_inc: -1: ldarx r8,0,r9 - addi r8,r8,1 - stdcx. r8,0,r9 - bne- 1b - blr -#endif -- cgit v1.2.3 From df4d4ef22446b3a789a4efd74d34f2ec1e24deb2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:24 +1000 Subject: powerpc/32s: Fix CONFIG_BOOK3S_601 uses We have two uses of CONFIG_BOOK3S_601, which doesn't exist. Fix them to use CONFIG_PPC_BOOK3S_601 which is the correct symbol. Fixes: 12c3f1fd87bf ("powerpc/32s: get rid of CPU_FTR_601 feature") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-5-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 2 +- arch/powerpc/include/asm/timex.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index f194339cef3b..155a197c0aa1 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -243,7 +243,7 @@ static inline void set_trap_norestart(struct pt_regs *regs) } #define arch_has_single_step() (1) -#ifndef CONFIG_BOOK3S_601 +#ifndef CONFIG_PPC_BOOK3S_601 #define arch_has_block_step() (true) #else #define arch_has_block_step() (false) diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index d2d2c4bd8435..6047402b0a4d 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -17,7 +17,7 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { - if (IS_ENABLED(CONFIG_BOOK3S_601)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) return 0; return mftb(); -- cgit v1.2.3 From 69eeff022433b54390a359c629f6457d7d1a8e94 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:25 +1000 Subject: powerpc/32s: Remove TAUException wart in traps.c All 32 and 64-bit builds that don't have CONFIG_TAU_INT enabled (all of them), get a definition of TAUException() in traps.c. On 64-bit it's completely useless, and just wastes ~120 bytes of text. On 32-bit it allows the kernel to link because head_32.S calls it unconditionally. Instead follow the example of altivec_assist_exception(), and if CONFIG_TAU_INT is not enabled just point it at unknown_exception using the preprocessor. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-6-mpe@ellerman.id.au --- arch/powerpc/kernel/head_32.S | 4 ++++ arch/powerpc/kernel/traps.c | 8 -------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index bbef6ce8322b..f3ab94d73936 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -671,6 +671,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) #ifndef CONFIG_ALTIVEC #define altivec_assist_exception unknown_exception +#endif + +#ifndef CONFIG_TAU_INT +#define TAUException unknown_exception #endif EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 97413a385720..d1ebe152f210 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -2060,14 +2060,6 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status) NOKPROBE_SYMBOL(DebugException); #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ -#if !defined(CONFIG_TAU_INT) -void TAUException(struct pt_regs *regs) -{ - printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", - regs->nip, regs->msr, regs->trap, print_tainted()); -} -#endif /* CONFIG_INT_TAU */ - #ifdef CONFIG_ALTIVEC void altivec_assist_exception(struct pt_regs *regs) { -- cgit v1.2.3 From e5eff89657e72a9050d95fde146b54c7dc165981 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:26 +1000 Subject: powerpc/boot: Fix CONFIG_PPC_MPC52XX references Commit 866bfc75f40e ("powerpc: conditionally compile platform-specific serial drivers") made some code depend on CONFIG_PPC_MPC52XX, which doesn't exist. Fix it to use CONFIG_PPC_MPC52xx. Fixes: 866bfc75f40e ("powerpc: conditionally compile platform-specific serial drivers") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-7-mpe@ellerman.id.au --- arch/powerpc/boot/Makefile | 2 +- arch/powerpc/boot/serial.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 4d43cb59b4a4..44af71543380 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -117,7 +117,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ elf_util.c $(zlib-y) devtree.c stdlib.c \ oflib.c ofconsole.c cuboot.c -src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c +src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 0bfa7e87e546..9a19e5905485 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -128,7 +128,7 @@ int serial_console_init(void) dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); #endif -#ifdef CONFIG_PPC_MPC52XX +#ifdef CONFIG_PPC_MPC52xx else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif -- cgit v1.2.3 From 157dad8678ad910ef7579c3f8ba93cc2940b014b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:27 +1000 Subject: powerpc/kvm: Use correct CONFIG symbol in comment This comment refers to the non-existent CONFIG_PPC_BOOK3S_XX, which confuses scripts/checkkconfigsymbols.py. Change it to use the correct symbol. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-8-mpe@ellerman.id.au --- arch/powerpc/kvm/book3s_interrupts.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f7ad99d972ce..607a9b99c334 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -26,7 +26,7 @@ #define FUNC(name) name #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) -#endif /* CONFIG_PPC_BOOK3S_XX */ +#endif /* CONFIG_PPC_BOOK3S_64 */ #define VCPU_LOAD_NVGPRS(vcpu) \ PPC_LL r14, VCPU_GPR(R14)(vcpu); \ -- cgit v1.2.3 From ee36d867b2fefeb6fb6661b27e62e29c9ca5e7e5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:28 +1000 Subject: powerpc: Drop old comment about CONFIG_POWER There's a comment in time.h referring to CONFIG_POWER, which doesn't exist. That confuses scripts/checkkconfigsymbols.py. Presumably the comment was referring to a CONFIG_POWER vs CONFIG_PPC, in which case for CONFIG_POWER we would #define __USE_RTC to 1. But instead we have CONFIG_PPC_BOOK3S_601, and these days we have IS_ENABLED(). So the comment is no longer relevant, drop it. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-9-mpe@ellerman.id.au --- arch/powerpc/include/asm/time.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index b287cfc2dd85..cb326720a8a1 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -39,7 +39,6 @@ struct div_result { }; /* Accessor functions for the timebase (RTC on 601) registers. */ -/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */ #define __USE_RTC() (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) #ifdef CONFIG_PPC64 -- cgit v1.2.3 From 81a413259a224f0d1783c41a74f18864d4f3d67e Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Mon, 30 Mar 2020 13:29:54 +0530 Subject: powerpc/xmon: Use `dcbf` inplace of `dcbi` instruction for 64bit Book3S Data Cache Block Invalidate (dcbi) instruction implemented back in PowerPC architecture version 2.03. But as per Power Processor Users Manual it is obsolete and not supported by POWER8/POWER9 core. Attempt to use of this illegal instruction results in a hypervisor emulation assistance interrupt. So, ifdef it out the option `i` in xmon for 64bit Book3S. 0:mon> fi cpu 0x0: Vector: 700 (Program Check) at [c000000003be74a0] pc: c000000000102030: cacheflush+0x180/0x1a0 lr: c000000000101f3c: cacheflush+0x8c/0x1a0 sp: c000000003be7730 msr: 8000000000081033 current = 0xc0000000035e5c00 paca = 0xc000000001910000 irqmask: 0x03 irq_happened: 0x01 pid = 1025, comm = bash Linux version 5.6.0-rc5-g5aa19adac (root@ltc-wspoon6) (gcc version 7.4.0 (Ubuntu 7.4.0-1ubuntu1~18.04.1)) #1 SMP Tue Mar 10 04:38:41 CDT 2020 cpu 0x0: Exception 700 (Program Check) in xmon, returning to main loop [c000000003be7c50] c00000000084abb0 __handle_sysrq+0xf0/0x2a0 [c000000003be7d00] c00000000084b3c0 write_sysrq_trigger+0xb0/0xe0 [c000000003be7d30] c0000000004d1edc proc_reg_write+0x8c/0x130 [c000000003be7d60] c00000000040dc7c __vfs_write+0x3c/0x70 [c000000003be7d80] c000000000410e70 vfs_write+0xd0/0x210 [c000000003be7dd0] c00000000041126c ksys_write+0xdc/0x130 [c000000003be7e20] c00000000000b9d0 system_call+0x5c/0x68 --- Exception: c01 (System Call) at 00007fffa345e420 SP (7ffff0b08ab0) is in userspace Signed-off-by: Balamuruhan S Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200330075954.538773-1-bala24@linux.ibm.com --- arch/powerpc/xmon/xmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 8fb1f857c11c..4923347b884b 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1869,7 +1869,7 @@ static void cacheflush(void) catch_memory_errors = 1; sync(); - if (cmd != 'i') { + if (cmd != 'i' || IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES) cflush((void *) adrs); } else { -- cgit v1.2.3 From ef26b76d1af61b90eb0dd3da58ad4f97d8e028f8 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 13 Jul 2020 20:37:48 +0530 Subject: powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA commit: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma") added support for allocating gigantic hugepages using CMA. This patch enables the same for powerpc Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713150749.25245-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/hugetlb.h | 7 +++++++ arch/powerpc/kernel/setup-common.c | 3 +++ arch/powerpc/mm/hugetlbpage.c | 18 ++++++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 551a9d4d3958..013165e62618 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -57,6 +57,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); +void gigantic_hugetlb_cma_reserve(void) __init; #include #else /* ! CONFIG_HUGETLB_PAGE */ @@ -71,6 +72,12 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, { return NULL; } + + +static inline void __init gigantic_hugetlb_cma_reserve(void) +{ +} + #endif /* CONFIG_HUGETLB_PAGE */ #endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d3faac53295..b198b0ff25bc 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -928,6 +928,9 @@ void __init setup_arch(char **cmdline_p) /* Reserve large chunks of memory for use by CMA for KVM. */ kvm_cma_reserve(); + /* Reserve large chunks of memory for us by CMA for hugetlb */ + gigantic_hugetlb_cma_reserve(); + klp_init_thread_info(&init_task); init_mm.start_code = (unsigned long)_stext; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index e9bfbccd975d..26292544630f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -684,3 +684,21 @@ void flush_dcache_icache_hugepage(struct page *page) } } } + +void __init gigantic_hugetlb_cma_reserve(void) +{ + unsigned long order = 0; + + if (radix_enabled()) + order = PUD_SHIFT - PAGE_SHIFT; + else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift) + /* + * For pseries we do use ibm,expected#pages for reserving 16G pages. + */ + order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; + + if (order) { + VM_WARN_ON(order < MAX_ORDER); + hugetlb_cma_reserve(order); + } +} -- cgit v1.2.3 From a5a8b258da7861009240b57687dfef47af91b406 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 13 Jul 2020 20:37:49 +0530 Subject: powerpc/kvm/cma: Improve kernel log during boot Current kernel gives: [ 0.000000] cma: Reserved 26224 MiB at 0x0000007959000000 [ 0.000000] hugetlb_cma: reserve 65536 MiB, up to 16384 MiB per node [ 0.000000] cma: Reserved 16384 MiB at 0x0000001800000000 With the fix [ 0.000000] kvm_cma_reserve: reserving 26214 MiB for global area [ 0.000000] cma: Reserved 26224 MiB at 0x0000007959000000 [ 0.000000] hugetlb_cma: reserve 65536 MiB, up to 16384 MiB per node [ 0.000000] cma: Reserved 16384 MiB at 0x0000001800000000 Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200713150749.25245-2-aneesh.kumar@linux.ibm.com --- arch/powerpc/kvm/book3s_hv_builtin.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 7cd3cf3d366b..073617ce83e0 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -113,7 +113,7 @@ void __init kvm_cma_reserve(void) selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; if (selected_size) { - pr_debug("%s: reserving %ld MiB for global area\n", __func__, + pr_info("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; cma_declare_contiguous(0, selected_size, 0, align_size, -- cgit v1.2.3 From bf6b7661f41615c0815fce0a3f27acb5fc005470 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 27 Jul 2020 14:29:08 +0530 Subject: powerpc/book3s64/radix: Add kernel command line option to disable radix GTSE This adds a kernel command line option that can be used to disable GTSE support. Disabling GTSE implies kernel will make hcalls to invalidate TLB entries. This was done so that we can do VM migration between configs that enable/disable GTSE support via hypervisor. To migrate a VM from a system that supports GTSE to a system that doesn't, we can boot the guest with radix_hcall_invalidate=on, thereby forcing the guest to use hcalls for TLB invalidates. The check for hcall availability is done in pSeries_setup_arch so that the panic message appears on the console. This should only happen on a hypervisor that doesn't force the guest to hash translation even though it can't handle the radix GTSE=0 request via CAS. With radix_hcall_invalidate=on if the hypervisor doesn't support hcall_rpt_invalidate hcall it should force the LPAR to hash translation. Signed-off-by: Aneesh Kumar K.V Tested-by: Bharata B Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727085908.420806-1-aneesh.kumar@linux.ibm.com --- Documentation/admin-guide/kernel-parameters.txt | 4 ++++ arch/powerpc/include/asm/firmware.h | 4 +++- arch/powerpc/kernel/prom_init.c | 13 +++++++++---- arch/powerpc/mm/init_64.c | 1 - arch/powerpc/platforms/pseries/firmware.c | 1 + arch/powerpc/platforms/pseries/setup.c | 5 +++++ 6 files changed, 22 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb95fad81c79..3ab61cd0f89c 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -896,6 +896,10 @@ disable_radix [PPC] Disable RADIX MMU mode on POWER9 + radix_hcall_invalidate=on [PPC/PSERIES] + Disable RADIX GTSE feature and use hcall for TLB + invalidate. + disable_tlbie [PPC] Disable TLBIE instruction. Currently does not work with KVM, with HASH MMU, or with coherent accelerators. diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index f67efbaba17f..0b295bdb201e 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -52,6 +52,7 @@ #define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000) #define FW_FEATURE_ULTRAVISOR ASM_CONST(0x0000004000000000) #define FW_FEATURE_STUFF_TCE ASM_CONST(0x0000008000000000) +#define FW_FEATURE_RPT_INVALIDATE ASM_CONST(0x0000010000000000) #ifndef __ASSEMBLY__ @@ -71,7 +72,8 @@ enum { FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN | FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 | FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE | - FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR, + FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR | + FW_FEATURE_RPT_INVALIDATE, FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR, FW_FEATURE_POWERNV_ALWAYS = 0, diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cbc605cfdec0..f279a1f58fa7 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -169,6 +169,7 @@ static unsigned long __prombss prom_tce_alloc_end; #ifdef CONFIG_PPC_PSERIES static bool __prombss prom_radix_disable; +static bool __prombss prom_radix_gtse_disable; static bool __prombss prom_xive_disable; #endif @@ -823,6 +824,12 @@ static void __init early_cmdline_parse(void) if (prom_radix_disable) prom_debug("Radix disabled from cmdline\n"); + opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on"); + if (opt) { + prom_radix_gtse_disable = true; + prom_debug("Radix GTSE disabled from cmdline\n"); + } + opt = prom_strstr(prom_cmd_line, "xive=off"); if (opt) { prom_xive_disable = true; @@ -1285,10 +1292,8 @@ static void __init prom_parse_platform_support(u8 index, u8 val, prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); break; case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ - if (val & OV5_FEAT(OV5_RADIX_GTSE)) { - prom_debug("Radix - GTSE supported\n"); - support->radix_gtse = true; - } + if (val & OV5_FEAT(OV5_RADIX_GTSE)) + support->radix_gtse = !prom_radix_gtse_disable; break; case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */ prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT), diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 152aa0200cef..4ae5fc0ceb30 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -406,7 +406,6 @@ static void __init early_check_vec5(void) } if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & OV5_FEAT(OV5_RADIX_GTSE))) { - pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE; } else cur_cpu_spec->mmu_features |= MMU_FTR_GTSE; diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index 3e49cc23a97a..4c7b7f5a2ebc 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -65,6 +65,7 @@ hypertas_fw_features_table[] = { {FW_FEATURE_HPT_RESIZE, "hcall-hpt-resize"}, {FW_FEATURE_BLOCK_REMOVE, "hcall-block-remove"}, {FW_FEATURE_PAPR_SCM, "hcall-scm"}, + {FW_FEATURE_RPT_INVALIDATE, "hcall-rpt-invalidate"}, }; /* Build up the firmware features bitmask using the contents of diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index e29c9bf0a3b9..b1a8c72a8c12 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -747,6 +747,11 @@ static void __init pSeries_setup_arch(void) smp_init_pseries(); + if (radix_enabled() && !mmu_has_feature(MMU_FTR_GTSE)) + if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) + panic("BUG: Radix support requires either GTSE or RPT_INVALIDATE\n"); + + /* openpic global configuration register (64-bit format). */ /* openpic Interrupt Source Unit pointer (64-bit format). */ /* python0 facility area (mmio) (64-bit format) REAL address. */ -- cgit v1.2.3 From 5e66a0cb5fbdc76f9ad86a1e8f43256dbad29ef7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 27 Jul 2020 17:42:01 -0500 Subject: powerpc: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/v5.7/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727224201.GA10133@embeddedor --- arch/powerpc/kernel/align.c | 8 ++++---- arch/powerpc/platforms/powermac/feature.c | 2 +- arch/powerpc/platforms/powernv/opal-async.c | 2 +- arch/powerpc/platforms/pseries/hvcserver.c | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 1f1ce8b86d5b..c7797eb958c7 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -178,11 +178,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ret |= __get_user_inatomic(temp.v[1], p++); ret |= __get_user_inatomic(temp.v[2], p++); ret |= __get_user_inatomic(temp.v[3], p++); - /* fall through */ + fallthrough; case 4: ret |= __get_user_inatomic(temp.v[4], p++); ret |= __get_user_inatomic(temp.v[5], p++); - /* fall through */ + fallthrough; case 2: ret |= __get_user_inatomic(temp.v[6], p++); ret |= __get_user_inatomic(temp.v[7], p++); @@ -263,11 +263,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ret |= __put_user_inatomic(data.v[1], p++); ret |= __put_user_inatomic(data.v[2], p++); ret |= __put_user_inatomic(data.v[3], p++); - /* fall through */ + fallthrough; case 4: ret |= __put_user_inatomic(data.v[4], p++); ret |= __put_user_inatomic(data.v[5], p++); - /* fall through */ + fallthrough; case 2: ret |= __put_user_inatomic(data.v[6], p++); ret |= __put_user_inatomic(data.v[7], p++); diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 181caa3f6717..5c77b9a24c0e 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -1465,7 +1465,7 @@ static long g5_i2s_enable(struct device_node *node, long param, long value) case 2: if (macio->type == macio_shasta) break; - /* fall through */ + fallthrough; default: return -ENODEV; } diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c index 1656e8965d6b..c094fdf5825c 100644 --- a/arch/powerpc/platforms/powernv/opal-async.c +++ b/arch/powerpc/platforms/powernv/opal-async.c @@ -104,7 +104,7 @@ static int __opal_async_release_token(int token) */ case ASYNC_TOKEN_DISPATCHED: opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED; - /* Fall through */ + fallthrough; default: rc = 1; } diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index 267139b13530..96e18d3b2fcf 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c @@ -45,7 +45,7 @@ static int hvcs_convert(long to_convert) case H_LONG_BUSY_ORDER_10_SEC: case H_LONG_BUSY_ORDER_100_SEC: return -EBUSY; - case H_FUNCTION: /* fall through */ + case H_FUNCTION: default: return -EPERM; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 4923347b884b..df7bca00f5ec 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -4278,7 +4278,7 @@ static int do_spu_cmd(void) subcmd = inchar(); if (isxdigit(subcmd) || subcmd == '\n') termch = subcmd; - /* fall through */ + fallthrough; case 'f': scanhex(&num); if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { -- cgit v1.2.3 From f2af201002a8bc22500c04cc474ea480bf361351 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 3 Mar 2020 11:27:48 +1000 Subject: powerpc/build: vdso linker warning for orphan sections Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200303012748.4190929-1-npiggin@gmail.com --- arch/powerpc/kernel/vdso32/Makefile | 2 +- arch/powerpc/kernel/vdso32/vdso32.lds.S | 1 + arch/powerpc/kernel/vdso64/Makefile | 2 +- arch/powerpc/kernel/vdso64/vdso64.lds.S | 3 ++- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index e147bbdc12cd..87ab1152d5ce 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) + cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $< diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 5206c2eb2a1d..4c985467a668 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -111,6 +111,7 @@ SECTIONS *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) + *(.glink .iplt .plt .rela*) } } diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 32ebb3522ea1..38c317f25141 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) + cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 256fb9720298..4e3a8d4ee614 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -30,7 +30,7 @@ SECTIONS . = ALIGN(16); .text : { *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) - *(.sfpr .glink) + *(.sfpr) } :text PROVIDE(__etext = .); PROVIDE(_etext = .); @@ -111,6 +111,7 @@ SECTIONS *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) + *(.glink .iplt .plt .rela*) } } -- cgit v1.2.3 From b6ac59d39a348af29477d7bfdc3ba23526e3f4ea Mon Sep 17 00:00:00 2001 From: Qinglang Miao Date: Tue, 28 Jul 2020 10:28:07 +0800 Subject: powerpc: use for_each_child_of_node() macro Use for_each_child_of_node() macro instead of open coding it. Signed-off-by: Qinglang Miao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200728022807.87815-1-miaoqinglang@huawei.com --- arch/powerpc/platforms/pasemi/misc.c | 3 +-- arch/powerpc/platforms/powermac/low_i2c.c | 6 ++---- arch/powerpc/platforms/powermac/pfunc_base.c | 4 ++-- arch/powerpc/platforms/powermac/udbg_scc.c | 2 +- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c index 1cd4ca14b08d..1bf65d02d3ba 100644 --- a/arch/powerpc/platforms/pasemi/misc.c +++ b/arch/powerpc/platforms/pasemi/misc.c @@ -56,8 +56,7 @@ static int __init pasemi_register_i2c_devices(void) if (!adap_node) continue; - node = NULL; - while ((node = of_get_next_child(adap_node, node))) { + for_each_child_of_node(adap_node, node) { struct i2c_board_info info = {}; const u32 *addr; int len; diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index bf4be4b53b44..f77a59b5c2e1 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -629,8 +629,7 @@ static void __init kw_i2c_probe(void) for (i = 0; i < chans; i++) kw_i2c_add(host, np, np, i); } else { - for (child = NULL; - (child = of_get_next_child(np, child)) != NULL;) { + for_each_child_of_node(np, child) { const u32 *reg = of_get_property(child, "reg", NULL); if (reg == NULL) @@ -1193,8 +1192,7 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, * platform function instance */ list_for_each_entry(bus, &pmac_i2c_busses, link) { - for (np = NULL; - (np = of_get_next_child(bus->busnode, np)) != NULL;) { + for_each_child_of_node(bus->busnode, np) { struct whitelist_ent *p; /* If multibus, check if device is on that bus */ if (bus->flags & pmac_i2c_multibus) diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 62311e84a423..f5422506d4b0 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -114,7 +114,7 @@ static void macio_gpio_init_one(struct macio_chip *macio) * Ok, got one, we dont need anything special to track them down, so * we just create them all */ - for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) { + for_each_child_of_node(gparent, gp) { const u32 *reg = of_get_property(gp, "reg", NULL); unsigned long offset; if (reg == NULL) @@ -133,7 +133,7 @@ static void macio_gpio_init_one(struct macio_chip *macio) macio->of_node); /* And now we run all the init ones */ - for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) + for_each_child_of_node(gparent, gp) pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL); /* Note: We do not at this point implement the "at sleep" or "at wake" diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 6b61a18e8db3..f286bdfe8346 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -80,7 +80,7 @@ void udbg_scc_init(int force_scc) path = of_get_property(of_chosen, "linux,stdout-path", NULL); if (path != NULL) stdout = of_find_node_by_path(path); - for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) { + for_each_child_of_node(escc, ch) { if (ch == stdout) ch_def = of_node_get(ch); if (of_node_name_eq(ch, "ch-a")) -- cgit v1.2.3 From cf1ae052e073c7ef6cf1a783a6427f7228253bd3 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 28 Jul 2020 01:11:12 +0800 Subject: powerpc/powernv/sriov: Remove unused but set variable 'phb' Gcc report warning as follows: arch/powerpc/platforms/powernv/pci-sriov.c:602:25: warning: variable 'phb' set but not used [-Wunused-but-set-variable] 602 | struct pnv_phb *phb; | ^~~ This variable is not used, so this commit removing it. Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Acked-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727171112.2781-1-weiyongjun1@huawei.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 8404d8c3901d..7894745fd4f8 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -599,10 +599,8 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) static void pnv_pci_sriov_disable(struct pci_dev *pdev) { u16 num_vfs, base_pe; - struct pnv_phb *phb; struct pnv_iov_data *iov; - phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); num_vfs = iov->num_vfs; base_pe = iov->vf_pe_arr[0].pe_number; -- cgit v1.2.3 From 854eb5022be04f81e318765f089f41a57c8e5d83 Mon Sep 17 00:00:00 2001 From: Harish Date: Tue, 9 Jun 2020 13:44:23 +0530 Subject: selftests/powerpc: Fix CPU affinity for child process On systems with large number of cpus, test fails trying to set affinity by calling sched_setaffinity() with smaller size for affinity mask. This patch fixes it by making sure that the size of allocated affinity mask is dependent on the number of CPUs as reported by get_nprocs(). Fixes: 00b7ec5c9cf3 ("selftests/powerpc: Import Anton's context_switch2 benchmark") Reported-by: Shirisha Ganta Signed-off-by: Sandipan Das Signed-off-by: Harish Reviewed-by: Kamalesh Babulal Reviewed-by: Satheesh Rajendran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609081423.529664-1-harish@linux.ibm.com --- .../selftests/powerpc/benchmarks/context_switch.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c index a2e8c9da7fa5..d50cc05df495 100644 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -104,8 +105,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) { - int pid; - cpu_set_t cpuset; + int pid, ncpus; + cpu_set_t *cpuset; + size_t size; pid = fork(); if (pid == -1) { @@ -116,14 +118,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) if (pid) return; - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); + ncpus = get_nprocs(); + size = CPU_ALLOC_SIZE(ncpus); + cpuset = CPU_ALLOC(ncpus); + if (!cpuset) { + perror("malloc"); + exit(1); + } + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu, size, cpuset); - if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { + if (sched_setaffinity(0, size, cpuset)) { perror("sched_setaffinity"); + CPU_FREE(cpuset); exit(1); } + CPU_FREE(cpuset); fn(arg); exit(0); -- cgit v1.2.3 From 8902c6f96364d1117236948d6c7b9178f428529c Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Tue, 28 Jul 2020 18:33:06 +0530 Subject: powerpc/ppc-opcode: Add divde and divdeu opcodes Include instruction opcodes for divde and divdeu as macros. Signed-off-by: Balamuruhan S Reviewed-by: Sandipan Das Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200728130308.1790982-2-bala24@linux.ibm.com --- arch/powerpc/include/asm/ppc-opcode.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 4c0bdafb6a7b..a6e3700c4566 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -466,6 +466,10 @@ #define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVDE_DOT(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) +#define PPC_RAW_DIVDEU(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) #define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) @@ -510,6 +514,8 @@ #define PPC_DARN(t, l) stringify_in_c(.long PPC_RAW_DARN(t, l)) #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_RAW_DCBAL(a, b)) #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b)) +#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b)) +#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b)) #define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh)) #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LDARX(t, a, b, eh)) #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LWARX(t, a, b, eh)) -- cgit v1.2.3 From 151c32bf5ebdd41114267717dc4b53d2632cbd30 Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Tue, 28 Jul 2020 18:33:07 +0530 Subject: powerpc/sstep: Add support for divde[.] and divdeu[.] instructions This patch adds emulation support for divde, divdeu instructions, - Divide Doubleword Extended (divde[.]) - Divide Doubleword Extended Unsigned (divdeu[.]) Signed-off-by: Balamuruhan S Reviewed-by: Sandipan Das Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200728130308.1790982-3-bala24@linux.ibm.com --- arch/powerpc/lib/sstep.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index c58ea9e787cb..caee8cc77e19 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1806,7 +1806,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->val = (int) regs->gpr[ra] / (int) regs->gpr[rb]; goto arith_done; - +#ifdef __powerpc64__ + case 425: /* divde[.] */ + asm volatile(PPC_DIVDE(%0, %1, %2) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb])); + goto arith_done; + case 393: /* divdeu[.] */ + asm volatile(PPC_DIVDEU(%0, %1, %2) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb])); + goto arith_done; +#endif case 755: /* darn */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -1; -- cgit v1.2.3 From b859c95cf4b936b5e8019e7ab68ee2740e609ffd Mon Sep 17 00:00:00 2001 From: Balamuruhan S Date: Tue, 28 Jul 2020 18:33:08 +0530 Subject: powerpc/test_emulate_step: Add testcases for divde[.] and divdeu[.] instructions Add testcases for divde, divde., divdeu, divdeu. emulated instructions to cover few scenarios, - with same dividend and divisor to have undefine RT for divdeu[.] - with divide by zero to have undefine RT for both divde[.] and divdeu[.] - with negative dividend to cover -|divisor| < r <= 0 if the dividend is negative for divde[.] - normal case with proper dividend and divisor for both divde[.] and divdeu[.] Signed-off-by: Balamuruhan S Reviewed-by: Sandipan Das Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200728130308.1790982-4-bala24@linux.ibm.com --- arch/powerpc/lib/test_emulate_step.c | 156 +++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index d242e9f72e0c..0a201b771477 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1019,6 +1019,162 @@ static struct compute_test compute_tests[] = { } } }, + { + .mnemonic = "divde", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = 1L, RB = 0", + .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = 1L, + .gpr[22] = 0, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + } + } + }, + { + .mnemonic = "divde.", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = 1L, RB = 0", + .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = 1L, + .gpr[22] = 0, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + } + } + }, + { + .mnemonic = "divdeu", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = 1L, RB = 0", + .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = 1L, + .gpr[22] = 0, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MAX - 1, RB = LONG_MAX", + .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MAX - 1, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MIN + 1, RB = LONG_MIN", + .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = LONG_MIN + 1, + .gpr[22] = LONG_MIN, + } + } + } + }, + { + .mnemonic = "divdeu.", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = 1L, RB = 0", + .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = 1L, + .gpr[22] = 0, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MAX - 1, RB = LONG_MAX", + .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), + .regs = { + .gpr[21] = LONG_MAX - 1, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MIN + 1, RB = LONG_MIN", + .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)), + .flags = IGNORE_GPR(20), + .regs = { + .gpr[21] = LONG_MIN + 1, + .gpr[22] = LONG_MIN, + } + } + } + }, { .mnemonic = "paddi", .cpu_feature = CPU_FTR_ARCH_31, -- cgit v1.2.3 From f3054ffd71b5afd44832b2207e6e90267e1cd2d1 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Tue, 28 Jul 2020 12:50:39 -0300 Subject: selftests/powerpc: Return skip code for spectre_v2 When running under older versions of qemu of under newer versions with old machine types, some security features will not be reported to the guest. This will lead the guest OS to consider itself Vulnerable to spectre_v2. So, spectre_v2 test fails in such cases when the host is mitigated and miss predictions cannot be detected as expected by the test. Make it return the skip code instead, for this particular case. We don't want to miss the case when the test fails and the system reports as mitigated or not affected. But it is not a problem to miss failures when the system reports as Vulnerable. Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200728155039.401445-1-cascardo@canonical.com --- tools/testing/selftests/powerpc/security/spectre_v2.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c index 8c6b982af2a8..c8d82b784102 100644 --- a/tools/testing/selftests/powerpc/security/spectre_v2.c +++ b/tools/testing/selftests/powerpc/security/spectre_v2.c @@ -183,6 +183,16 @@ int spectre_v2_test(void) if (miss_percent > 15) { printf("Branch misses > 15%% unexpected in this configuration!\n"); printf("Possible mis-match between reported & actual mitigation\n"); + /* + * Such a mismatch may be caused by a guest system + * reporting as vulnerable when the host is mitigated. + * Return skip code to avoid detecting this as an error. + * We are not vulnerable and reporting otherwise, so + * missing such a mismatch is safe. + */ + if (state == VULNERABLE) + return 4; + return 1; } break; -- cgit v1.2.3 From c75d42e4c768c403f259f6c7f6217c850cf11be9 Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Wed, 15 Apr 2020 11:23:42 +1000 Subject: ocxl: Remove unnecessary externs Function declarations don't need externs, remove the existing ones so they are consistent with newer code Signed-off-by: Alastair D'Silva Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200415012343.919255-2-alastair@d-silva.org --- arch/powerpc/include/asm/pnv-ocxl.h | 40 ++++++++++++++++++------------------- include/misc/ocxl.h | 6 +++--- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h index 7de82647e761..ee79d2cd9fb6 100644 --- a/arch/powerpc/include/asm/pnv-ocxl.h +++ b/arch/powerpc/include/asm/pnv-ocxl.h @@ -9,28 +9,26 @@ #define PNV_OCXL_TL_BITS_PER_RATE 4 #define PNV_OCXL_TL_RATE_BUF_SIZE ((PNV_OCXL_TL_MAX_TEMPLATE+1) * PNV_OCXL_TL_BITS_PER_RATE / 8) -extern int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, - u16 *supported); -extern int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count); +int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, u16 *supported); +int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count); -extern int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap, +int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap, char *rate_buf, int rate_buf_size); -extern int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap, - uint64_t rate_buf_phys, int rate_buf_size); - -extern int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq); -extern void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar, - void __iomem *tfc, void __iomem *pe_handle); -extern int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr, - void __iomem **dar, void __iomem **tfc, - void __iomem **pe_handle); - -extern int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, - void **platform_data); -extern void pnv_ocxl_spa_release(void *platform_data); -extern int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); - -extern int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr); -extern void pnv_ocxl_free_xive_irq(u32 irq); +int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap, + uint64_t rate_buf_phys, int rate_buf_size); + +int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq); +void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar, + void __iomem *tfc, void __iomem *pe_handle); +int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr, + void __iomem **dar, void __iomem **tfc, + void __iomem **pe_handle); + +int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **platform_data); +void pnv_ocxl_spa_release(void *platform_data); +int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); + +int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr); +void pnv_ocxl_free_xive_irq(u32 irq); #endif /* _ASM_PNV_OCXL_H */ diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index 06dd5839e438..0a762e387418 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -173,7 +173,7 @@ int ocxl_context_detach(struct ocxl_context *ctx); * * Returns 0 on success, negative on failure */ -extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); +int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); /** * Frees an IRQ associated with an AFU context @@ -182,7 +182,7 @@ extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); * * Returns 0 on success, negative on failure */ -extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); +int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); /** * Gets the address of the trigger page for an IRQ @@ -193,7 +193,7 @@ extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); * * returns the trigger page address, or 0 if the IRQ is not valid */ -extern u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id); +u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id); /** * Provide a callback to be called when an IRQ is triggered -- cgit v1.2.3 From 3591538a31af37cf6a2d83f1da99e651a822af8b Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Wed, 15 Apr 2020 11:23:43 +1000 Subject: ocxl: Address kernel doc errors & warnings This patch addresses warnings and errors from the kernel doc scripts for the OpenCAPI driver. It also makes minor tweaks to make the docs more consistent. Signed-off-by: Alastair D'Silva Acked-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200415012343.919255-3-alastair@d-silva.org --- drivers/misc/ocxl/config.c | 24 +++++----- drivers/misc/ocxl/ocxl_internal.h | 9 ++-- include/misc/ocxl.h | 96 ++++++++++++++++----------------------- 3 files changed, 55 insertions(+), 74 deletions(-) diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 42f7a1298775..4d490b92d951 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -344,16 +344,16 @@ static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn, } /** - * Read the template version from the AFU - * dev: the device for the AFU - * fn: the AFU offsets - * len: outputs the template length - * version: outputs the major<<8,minor version + * read_template_version() - Read the template version from the AFU + * @dev: the device for the AFU + * @fn: the AFU offsets + * @len: outputs the template length + * @version: outputs the major<<8,minor version * * Returns 0 on success, negative on failure */ static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn, - u16 *len, u16 *version) + u16 *len, u16 *version) { u32 val32; u8 major, minor; @@ -547,16 +547,16 @@ static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu) } /** - * Populate AFU metadata regarding LPC memory - * dev: the device for the AFU - * fn: the AFU offsets - * afu: the AFU struct to populate the LPC metadata into + * read_afu_lpc_memory_info() - Populate AFU metadata regarding LPC memory + * @dev: the device for the AFU + * @fn: the AFU offsets + * @afu: the AFU struct to populate the LPC metadata into * * Returns 0 on success, negative on failure */ static int read_afu_lpc_memory_info(struct pci_dev *dev, - struct ocxl_fn_config *fn, - struct ocxl_afu_config *afu) + struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) { int rc; u32 val32; diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index af9a84aeee6f..0bad0a123af6 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -128,11 +128,12 @@ int ocxl_config_check_afu_index(struct pci_dev *dev, struct ocxl_fn_config *fn, int afu_idx); /** - * Update values within a Process Element + * ocxl_link_update_pe() - Update values within a Process Element + * @link_handle: the link handle associated with the process element + * @pasid: the PASID for the AFU context + * @tid: the new thread id for the process element * - * link_handle: the link handle associated with the process element - * pasid: the PASID for the AFU context - * tid: the new thread id for the process element + * Returns 0 on success */ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid); diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index 0a762e387418..357ef1aadbc0 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -62,8 +62,7 @@ struct ocxl_context; // Device detection & initialisation /** - * Open an OpenCAPI function on an OpenCAPI device - * + * ocxl_function_open() - Open an OpenCAPI function on an OpenCAPI device * @dev: The PCI device that contains the function * * Returns an opaque pointer to the function, or an error pointer (check with IS_ERR) @@ -71,8 +70,7 @@ struct ocxl_context; struct ocxl_fn *ocxl_function_open(struct pci_dev *dev); /** - * Get the list of AFUs associated with a PCI function device - * + * ocxl_function_afu_list() - Get the list of AFUs associated with a PCI function device * Returns a list of struct ocxl_afu * * * @fn: The OpenCAPI function containing the AFUs @@ -80,8 +78,7 @@ struct ocxl_fn *ocxl_function_open(struct pci_dev *dev); struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn); /** - * Fetch an AFU instance from an OpenCAPI function - * + * ocxl_function_fetch_afu() - Fetch an AFU instance from an OpenCAPI function * @fn: The OpenCAPI function to get the AFU from * @afu_idx: The index of the AFU to get * @@ -92,23 +89,20 @@ struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn); struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx); /** - * Take a reference to an AFU - * + * ocxl_afu_get() - Take a reference to an AFU * @afu: The AFU to increment the reference count on */ void ocxl_afu_get(struct ocxl_afu *afu); /** - * Release a reference to an AFU - * + * ocxl_afu_put() - Release a reference to an AFU * @afu: The AFU to decrement the reference count on */ void ocxl_afu_put(struct ocxl_afu *afu); /** - * Get the configuration information for an OpenCAPI function - * + * ocxl_function_config() - Get the configuration information for an OpenCAPI function * @fn: The OpenCAPI function to get the config for * * Returns the function config, or NULL on error @@ -116,8 +110,7 @@ void ocxl_afu_put(struct ocxl_afu *afu); const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn); /** - * Close an OpenCAPI function - * + * ocxl_function_close() - Close an OpenCAPI function * This will free any AFUs previously retrieved from the function, and * detach and associated contexts. The contexts must by freed by the caller. * @@ -129,8 +122,7 @@ void ocxl_function_close(struct ocxl_fn *fn); // Context allocation /** - * Allocate an OpenCAPI context - * + * ocxl_context_alloc() - Allocate an OpenCAPI context * @context: The OpenCAPI context to allocate, must be freed with ocxl_context_free * @afu: The AFU the context belongs to * @mapping: The mapping to unmap when the context is closed (may be NULL) @@ -139,14 +131,13 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, struct address_space *mapping); /** - * Free an OpenCAPI context - * + * ocxl_context_free() - Free an OpenCAPI context * @ctx: The OpenCAPI context to free */ void ocxl_context_free(struct ocxl_context *ctx); /** - * Grant access to an MM to an OpenCAPI context + * ocxl_context_attach() - Grant access to an MM to an OpenCAPI context * @ctx: The OpenCAPI context to attach * @amr: The value of the AMR register to restrict access * @mm: The mm to attach to the context @@ -157,7 +148,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm); /** - * Detach an MM from an OpenCAPI context + * ocxl_context_detach() - Detach an MM from an OpenCAPI context * @ctx: The OpenCAPI context to attach * * Returns 0 on success, negative on failure @@ -167,7 +158,7 @@ int ocxl_context_detach(struct ocxl_context *ctx); // AFU IRQs /** - * Allocate an IRQ associated with an AFU context + * ocxl_afu_irq_alloc() - Allocate an IRQ associated with an AFU context * @ctx: the AFU context * @irq_id: out, the IRQ ID * @@ -176,7 +167,7 @@ int ocxl_context_detach(struct ocxl_context *ctx); int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); /** - * Frees an IRQ associated with an AFU context + * ocxl_afu_irq_free() - Frees an IRQ associated with an AFU context * @ctx: the AFU context * @irq_id: the IRQ ID * @@ -185,7 +176,7 @@ int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); /** - * Gets the address of the trigger page for an IRQ + * ocxl_afu_irq_get_addr() - Gets the address of the trigger page for an IRQ * This can then be provided to an AFU which will write to that * page to trigger the IRQ. * @ctx: The AFU context that the IRQ is associated with @@ -196,7 +187,7 @@ int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id); /** - * Provide a callback to be called when an IRQ is triggered + * ocxl_irq_set_handler() - Provide a callback to be called when an IRQ is triggered * @ctx: The AFU context that the IRQ is associated with * @irq_id: The IRQ ID * @handler: the callback to be called when the IRQ is triggered @@ -213,8 +204,7 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id, // AFU Metadata /** - * Get a pointer to the config for an AFU - * + * ocxl_afu_config() - Get a pointer to the config for an AFU * @afu: a pointer to the AFU to get the config for * * Returns a pointer to the AFU config @@ -222,27 +212,24 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id, struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu); /** - * Assign opaque hardware specific information to an OpenCAPI AFU. - * - * @dev: The PCI device associated with the OpenCAPI device + * ocxl_afu_set_private() - Assign opaque hardware specific information to an OpenCAPI AFU. + * @afu: The OpenCAPI AFU * @private: the opaque hardware specific information to assign to the driver */ void ocxl_afu_set_private(struct ocxl_afu *afu, void *private); /** - * Fetch the hardware specific information associated with an external OpenCAPI - * AFU. This may be consumed by an external OpenCAPI driver. - * - * @afu: The AFU + * ocxl_afu_get_private() - Fetch the hardware specific information associated with + * an external OpenCAPI AFU. This may be consumed by an external OpenCAPI driver. + * @afu: The OpenCAPI AFU * * Returns the opaque pointer associated with the device, or NULL if not set */ -void *ocxl_afu_get_private(struct ocxl_afu *dev); +void *ocxl_afu_get_private(struct ocxl_afu *afu); // Global MMIO /** - * Read a 32 bit value from global MMIO - * + * ocxl_global_mmio_read32() - Read a 32 bit value from global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -251,11 +238,10 @@ void *ocxl_afu_get_private(struct ocxl_afu *dev); * Returns 0 for success, negative on error */ int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 *val); + enum ocxl_endian endian, u32 *val); /** - * Read a 64 bit value from global MMIO - * + * ocxl_global_mmio_read64() - Read a 64 bit value from global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -264,11 +250,10 @@ int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 *val); + enum ocxl_endian endian, u64 *val); /** - * Write a 32 bit value to global MMIO - * + * ocxl_global_mmio_write32() - Write a 32 bit value to global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -277,11 +262,10 @@ int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 val); + enum ocxl_endian endian, u32 val); /** - * Write a 64 bit value to global MMIO - * + * ocxl_global_mmio_write64() - Write a 64 bit value to global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -290,11 +274,10 @@ int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 val); + enum ocxl_endian endian, u64 val); /** - * Set bits in a 32 bit global MMIO register - * + * ocxl_global_mmio_set32() - Set bits in a 32 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -303,11 +286,10 @@ int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 mask); + enum ocxl_endian endian, u32 mask); /** - * Set bits in a 64 bit global MMIO register - * + * ocxl_global_mmio_set64() - Set bits in a 64 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -316,11 +298,10 @@ int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 mask); + enum ocxl_endian endian, u64 mask); /** - * Set bits in a 32 bit global MMIO register - * + * ocxl_global_mmio_clear32() - Set bits in a 32 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -329,11 +310,10 @@ int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 mask); + enum ocxl_endian endian, u32 mask); /** - * Set bits in a 64 bit global MMIO register - * + * ocxl_global_mmio_clear64() - Set bits in a 64 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -342,7 +322,7 @@ int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 mask); + enum ocxl_endian endian, u64 mask); // Functions left here are for compatibility with the cxlflash driver -- cgit v1.2.3 From d3c61954fc1827df571e235b9a98e10108ef5c3d Mon Sep 17 00:00:00 2001 From: David Lamparter Date: Fri, 21 Sep 2018 01:04:22 +0200 Subject: powerpc/fsl/dts: add missing P4080DS I2C devices This just adds the zl2006 voltage regulators / power monitors and the onboard I2C eeproms. The ICS9FG108 clock chip doesn't seem to have a driver, so it is left in the DTS as a comment. And for good measure, the SPD eeproms are tagged as such. Signed-off-by: David Lamparter Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20180920230422.GK487685@eidolon.nox.tf --- arch/powerpc/boot/dts/fsl/p4080ds.dts | 43 +++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/dts/fsl/p4080ds.dts b/arch/powerpc/boot/dts/fsl/p4080ds.dts index 65e20152e22f..969b32c4f2d5 100644 --- a/arch/powerpc/boot/dts/fsl/p4080ds.dts +++ b/arch/powerpc/boot/dts/fsl/p4080ds.dts @@ -125,11 +125,11 @@ i2c@118100 { eeprom@51 { - compatible = "atmel,24c256"; + compatible = "atmel,spd"; reg = <0x51>; }; eeprom@52 { - compatible = "atmel,24c256"; + compatible = "atmel,spd"; reg = <0x52>; }; rtc@68 { @@ -143,6 +143,45 @@ }; }; + i2c@118000 { + zl2006@21 { + compatible = "zl2006"; + reg = <0x21>; + }; + zl2006@22 { + compatible = "zl2006"; + reg = <0x22>; + }; + zl2006@23 { + compatible = "zl2006"; + reg = <0x23>; + }; + zl2006@24 { + compatible = "zl2006"; + reg = <0x24>; + }; + eeprom@50 { + compatible = "atmel,24c64"; + reg = <0x50>; + }; + eeprom@55 { + compatible = "atmel,24c64"; + reg = <0x55>; + }; + eeprom@56 { + compatible = "atmel,24c64"; + reg = <0x56>; + }; + eeprom@57 { + compatible = "atmel,24c02"; + reg = <0x57>; + }; + }; + + i2c@119100 { + /* 0x6E: ICS9FG108 */ + }; + usb0: usb@210000 { phy_type = "ulpi"; }; -- cgit v1.2.3 From ada68a66b72687e6b74e35c42efd1783e84b01fd Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Tue, 23 Jun 2020 15:57:50 +0530 Subject: powerpc/64s: Move HMI IRQ stat from percpu variable to paca. With the proposed change in percpu bootmem allocator to use page mapping [1], the percpu first chunk memory area can come from vmalloc ranges. This makes the HMI (Hypervisor Maintenance Interrupt) handler crash the kernel whenever percpu variable is accessed in real mode. This patch fixes this issue by moving the HMI IRQ stat inside paca for safe access in realmode. [1] https://lore.kernel.org/linuxppc-dev/20200608070904.387440-1-aneesh.kumar@linux.ibm.com/ Suggested-by: Aneesh Kumar K.V Signed-off-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159290806973.3642154.5244613424529764050.stgit@jupiter --- arch/powerpc/include/asm/hardirq.h | 1 - arch/powerpc/include/asm/paca.h | 1 + arch/powerpc/kernel/irq.c | 9 ++++++--- arch/powerpc/kernel/mce.c | 2 +- arch/powerpc/kvm/book3s_hv_ras.c | 2 +- 5 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index f1e9067bd5ac..f133b5930ae1 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -13,7 +13,6 @@ typedef struct { unsigned int pmu_irqs; unsigned int mce_exceptions; unsigned int spurious_irqs; - unsigned int hmi_exceptions; unsigned int sreset_irqs; #ifdef CONFIG_PPC_WATCHDOG unsigned int soft_nmi_irqs; diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 45a839a7c6cf..cc07c399306e 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -225,6 +225,7 @@ struct paca_struct { u16 in_mce; u8 hmi_event_available; /* HMI event is available */ u8 hmi_p9_special_emu; /* HMI P9 special emulation */ + u32 hmi_irqs; /* HMI irq stat */ #endif u8 ftrace_enabled; /* Hard disable ftrace */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 05b1cc0e009e..bf21ebd36190 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -621,13 +621,14 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); seq_printf(p, " Machine check exceptions\n"); +#ifdef CONFIG_PPC_BOOK3S_64 if (cpu_has_feature(CPU_FTR_HVMODE)) { seq_printf(p, "%*s: ", prec, "HMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", - per_cpu(irq_stat, j).hmi_exceptions); + seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs); seq_printf(p, " Hypervisor Maintenance Interrupts\n"); } +#endif seq_printf(p, "%*s: ", prec, "NMI"); for_each_online_cpu(j) @@ -665,7 +666,9 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += per_cpu(irq_stat, cpu).mce_exceptions; sum += per_cpu(irq_stat, cpu).spurious_irqs; sum += per_cpu(irq_stat, cpu).timer_irqs_others; - sum += per_cpu(irq_stat, cpu).hmi_exceptions; +#ifdef CONFIG_PPC_BOOK3S_64 + sum += paca_ptrs[cpu]->hmi_irqs; +#endif sum += per_cpu(irq_stat, cpu).sreset_irqs; #ifdef CONFIG_PPC_WATCHDOG sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index c2fa8ee63720..ada59f6c4298 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -727,7 +727,7 @@ long hmi_exception_realmode(struct pt_regs *regs) { int ret; - __this_cpu_inc(irq_stat.hmi_exceptions); + local_paca->hmi_irqs++; ret = hmi_handle_debugtrig(regs); if (ret >= 0) diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 79f7d07ef674..6028628ea3ac 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -244,7 +244,7 @@ long kvmppc_realmode_hmi_handler(void) { bool resync_req; - __this_cpu_inc(irq_stat.hmi_exceptions); + local_paca->hmi_irqs++; if (hmi_handle_debugtrig(NULL) >= 0) return 1; -- cgit v1.2.3 From fdaa7ce2016ccd09a538b05bace5f4479662ddcb Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 29 Jul 2020 14:08:28 +1000 Subject: powerpc/configs: Add BLK_DEV_NVME to pseries_defconfig I've forgotten to manually enable NVME when building pseries kernels for machines with NVME adapters. Since it's a reasonably common configuration, enable it by default. Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200729040828.2312966-1-anton@ozlabs.org --- arch/powerpc/configs/pseries_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index efd5398928d5..d5dece981c02 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -93,6 +93,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_NVME=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -- cgit v1.2.3 From f891f19736bdf404845f97d8038054be37160ea8 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:09:19 +0530 Subject: kexec_file: Allow archs to handle special regions while locating memory hole Some architectures may have special memory regions, within the given memory range, which can't be used for the buffer in a kexec segment. Implement weak arch_kexec_locate_mem_hole() definition which arch code may override, to take care of special regions, while trying to locate a memory hole. Also, add the missing declarations for arch overridable functions and and drop the __weak descriptors in the declarations to avoid non-weak definitions from becoming weak. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Acked-by: Dave Young Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602273603.575379.17665852963340380839.stgit@hbathini --- include/linux/kexec.h | 29 ++++++++++++++++++----------- kernel/kexec_file.c | 16 ++++++++++++++-- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ea67910ae6b7..9e93bef52968 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -183,17 +183,24 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void * __weak arch_kexec_kernel_image_load(struct kimage *image); -int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); +/* Architectures may override the below functions */ +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); +void *arch_kexec_kernel_image_load(struct kimage *image); +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kexec_apply_relocations(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#ifdef CONFIG_KEXEC_SIG +int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len); +#endif +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index bb05fd52de85..eb42d2efa16a 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -657,6 +657,19 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) return ret == 1 ? 0 : -EADDRNOTAVAIL; } +/** + * arch_kexec_locate_mem_hole - Find free memory to place the segments. + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + return kexec_locate_mem_hole(kbuf); +} + /** * kexec_add_buffer - place a buffer in a kexec segment * @kbuf: Buffer contents and memory parameters. @@ -669,7 +682,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) */ int kexec_add_buffer(struct kexec_buf *kbuf) { - struct kexec_segment *ksegment; int ret; @@ -697,7 +709,7 @@ int kexec_add_buffer(struct kexec_buf *kbuf) kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); /* Walk the RAM ranges and allocate a suitable range for the buffer */ - ret = kexec_locate_mem_hole(kbuf); + ret = arch_kexec_locate_mem_hole(kbuf); if (ret) return ret; -- cgit v1.2.3 From 19031275a5881233b4fc31b7dee68bf0b0758bbc Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:09:41 +0530 Subject: powerpc/kexec_file: Mark PPC64 specific code Some of the kexec_file_load code isn't PPC64 specific. Move PPC64 specific code from kexec/file_load.c to kexec/file_load_64.c. Also, rename purgatory/trampoline.S to purgatory/trampoline_64.S in the same spirit. No functional changes. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Laurent Dufour Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602276920.575379.10390965946438306388.stgit@hbathini --- arch/powerpc/include/asm/kexec.h | 9 +++ arch/powerpc/kexec/Makefile | 2 +- arch/powerpc/kexec/elf_64.c | 7 +- arch/powerpc/kexec/file_load.c | 19 +----- arch/powerpc/kexec/file_load_64.c | 87 ++++++++++++++++++++++++ arch/powerpc/purgatory/Makefile | 4 +- arch/powerpc/purgatory/trampoline.S | 117 --------------------------------- arch/powerpc/purgatory/trampoline_64.S | 117 +++++++++++++++++++++++++++++++++ 8 files changed, 222 insertions(+), 140 deletions(-) create mode 100644 arch/powerpc/kexec/file_load_64.c delete mode 100644 arch/powerpc/purgatory/trampoline.S create mode 100644 arch/powerpc/purgatory/trampoline_64.S diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 89f7e3462292..9b3feef55aaa 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -116,6 +116,15 @@ int setup_new_fdt(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline); int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size); + +#ifdef CONFIG_PPC64 +int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, + const void *fdt, unsigned long kernel_load_addr, + unsigned long fdt_load_addr); +int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, + unsigned long initrd_load_addr, + unsigned long initrd_len, const char *cmdline); +#endif /* CONFIG_PPC64 */ #endif /* CONFIG_KEXEC_FILE */ #else /* !CONFIG_KEXEC_CORE */ diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile index 86380c69f5ce..67c355329457 100644 --- a/arch/powerpc/kexec/Makefile +++ b/arch/powerpc/kexec/Makefile @@ -7,7 +7,7 @@ obj-y += core.o crash.o core_$(BITS).o obj-$(CONFIG_PPC32) += relocate_32.o -obj-$(CONFIG_KEXEC_FILE) += file_load.o elf_$(BITS).o +obj-$(CONFIG_KEXEC_FILE) += file_load.o file_load_$(BITS).o elf_$(BITS).o ifdef CONFIG_HAVE_IMA_KEXEC ifdef CONFIG_IMA diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 3072fd6dbe94..23ad04ccaf8e 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -88,7 +88,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, goto out; } - ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); + ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr, + initrd_len, cmdline); if (ret) goto out; @@ -107,8 +108,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr); slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset; - ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr, - fdt_load_addr); + ret = setup_purgatory_ppc64(image, slave_code, fdt, kernel_load_addr, + fdt_load_addr); if (ret) pr_err("Error setting up the purgatory.\n"); diff --git a/arch/powerpc/kexec/file_load.c b/arch/powerpc/kexec/file_load.c index 143c91724617..38439aba27d7 100644 --- a/arch/powerpc/kexec/file_load.c +++ b/arch/powerpc/kexec/file_load.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * ppc64 code to implement the kexec_file_load syscall + * powerpc code to implement the kexec_file_load syscall * * Copyright (C) 2004 Adam Litke (agl@us.ibm.com) * Copyright (C) 2004 IBM Corp. @@ -20,22 +20,7 @@ #include #include -#define SLAVE_CODE_SIZE 256 - -const struct kexec_file_ops * const kexec_file_loaders[] = { - &kexec_elf64_ops, - NULL -}; - -int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len) -{ - /* We don't support crash kernels yet. */ - if (image->type == KEXEC_TYPE_CRASH) - return -EOPNOTSUPP; - - return kexec_image_probe_default(image, buf, buf_len); -} +#define SLAVE_CODE_SIZE 256 /* First 0x100 bytes */ /** * setup_purgatory - initialize the purgatory's global variables diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c new file mode 100644 index 000000000000..3e9ac5f216b0 --- /dev/null +++ b/arch/powerpc/kexec/file_load_64.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ppc64 code to implement the kexec_file_load syscall + * + * Copyright (C) 2004 Adam Litke (agl@us.ibm.com) + * Copyright (C) 2004 IBM Corp. + * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation + * Copyright (C) 2005 R Sharada (sharada@in.ibm.com) + * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com) + * Copyright (C) 2020 IBM Corporation + * + * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c. + * Heavily modified for the kernel by + * Hari Bathini, IBM Corporation. + */ + +#include +#include +#include + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &kexec_elf64_ops, + NULL +}; + +/** + * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global + * variables and call setup_purgatory() to initialize + * common global variable. + * @image: kexec image. + * @slave_code: Slave code for the purgatory. + * @fdt: Flattened device tree for the next kernel. + * @kernel_load_addr: Address where the kernel is loaded. + * @fdt_load_addr: Address where the flattened device tree is loaded. + * + * Returns 0 on success, negative errno on error. + */ +int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, + const void *fdt, unsigned long kernel_load_addr, + unsigned long fdt_load_addr) +{ + int ret; + + ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr, + fdt_load_addr); + if (ret) + pr_err("Failed to setup purgatory symbols"); + return ret; +} + +/** + * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel + * being loaded. + * @image: kexec image being loaded. + * @fdt: Flattened device tree for the next kernel. + * @initrd_load_addr: Address where the next initrd will be loaded. + * @initrd_len: Size of the next initrd, or 0 if there will be none. + * @cmdline: Command line for the next kernel, or NULL if there will + * be none. + * + * Returns 0 on success, negative errno on error. + */ +int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, + unsigned long initrd_load_addr, + unsigned long initrd_len, const char *cmdline) +{ + return setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); +} + +/** + * arch_kexec_kernel_image_probe - Does additional handling needed to setup + * kexec segments. + * @image: kexec image being loaded. + * @buf: Buffer pointing to elf data. + * @buf_len: Length of the buffer. + * + * Returns 0 on success, negative errno on error. + */ +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len) +{ + /* We don't support crash kernels yet. */ + if (image->type == KEXEC_TYPE_CRASH) + return -EOPNOTSUPP; + + return kexec_image_probe_default(image, buf, buf_len); +} diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile index 7c6d8b14f440..348f59581052 100644 --- a/arch/powerpc/purgatory/Makefile +++ b/arch/powerpc/purgatory/Makefile @@ -2,11 +2,11 @@ KASAN_SANITIZE := n -targets += trampoline.o purgatory.ro kexec-purgatory.c +targets += trampoline_$(BITS).o purgatory.ro kexec-purgatory.c LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE +$(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE $(call if_changed,ld) quiet_cmd_bin2c = BIN2C $@ diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S deleted file mode 100644 index a5a83c3f53e6..000000000000 --- a/arch/powerpc/purgatory/trampoline.S +++ /dev/null @@ -1,117 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * kexec trampoline - * - * Based on code taken from kexec-tools and kexec-lite. - * - * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation - * Copyright (C) 2006, Mohan Kumar M, IBM Corporation - * Copyright (C) 2013, Anton Blanchard, IBM Corporation - */ - -#include - - .machine ppc64 - .balign 256 - .globl purgatory_start -purgatory_start: - b master - - /* ABI: possible run_at_load flag at 0x5c */ - .org purgatory_start + 0x5c - .globl run_at_load -run_at_load: - .long 0 - .size run_at_load, . - run_at_load - - /* ABI: slaves start at 60 with r3=phys */ - .org purgatory_start + 0x60 -slave: - b . - /* ABI: end of copied region */ - .org purgatory_start + 0x100 - .size purgatory_start, . - purgatory_start - -/* - * The above 0x100 bytes at purgatory_start are replaced with the - * code from the kernel (or next stage) by setup_purgatory(). - */ - -master: - or %r1,%r1,%r1 /* low priority to let other threads catchup */ - isync - mr %r17,%r3 /* save cpu id to r17 */ - mr %r15,%r4 /* save physical address in reg15 */ - - or %r3,%r3,%r3 /* ok now to high priority, lets boot */ - lis %r6,0x1 - mtctr %r6 /* delay a bit for slaves to catch up */ - bdnz . /* before we overwrite 0-100 again */ - - bl 0f /* Work out where we're running */ -0: mflr %r18 - - /* load device-tree address */ - ld %r3, (dt_offset - 0b)(%r18) - mr %r16,%r3 /* save dt address in reg16 */ - li %r4,20 - LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ - cmpwi %cr0,%r6,2 /* v2 or later? */ - blt 1f - li %r4,28 - STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ -1: - /* load the kernel address */ - ld %r4,(kernel - 0b)(%r18) - - /* load the run_at_load flag */ - /* possibly patched by kexec */ - ld %r6,(run_at_load - 0b)(%r18) - /* and patch it into the kernel */ - stw %r6,(0x5c)(%r4) - - mr %r3,%r16 /* restore dt address */ - - li %r5,0 /* r5 will be 0 for kernel */ - - mfmsr %r11 - andi. %r10,%r11,1 /* test MSR_LE */ - bne .Little_endian - - mtctr %r4 /* prepare branch to */ - bctr /* start kernel */ - -.Little_endian: - mtsrr0 %r4 /* prepare branch to */ - - clrrdi %r11,%r11,1 /* clear MSR_LE */ - mtsrr1 %r11 - - rfid /* update MSR and start kernel */ - - - .balign 8 - .globl kernel -kernel: - .8byte 0x0 - .size kernel, . - kernel - - .balign 8 - .globl dt_offset -dt_offset: - .8byte 0x0 - .size dt_offset, . - dt_offset - - - .data - .balign 8 -.globl purgatory_sha256_digest -purgatory_sha256_digest: - .skip 32 - .size purgatory_sha256_digest, . - purgatory_sha256_digest - - .balign 8 -.globl purgatory_sha_regions -purgatory_sha_regions: - .skip 8 * 2 * 16 - .size purgatory_sha_regions, . - purgatory_sha_regions diff --git a/arch/powerpc/purgatory/trampoline_64.S b/arch/powerpc/purgatory/trampoline_64.S new file mode 100644 index 000000000000..a5a83c3f53e6 --- /dev/null +++ b/arch/powerpc/purgatory/trampoline_64.S @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * kexec trampoline + * + * Based on code taken from kexec-tools and kexec-lite. + * + * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation + * Copyright (C) 2006, Mohan Kumar M, IBM Corporation + * Copyright (C) 2013, Anton Blanchard, IBM Corporation + */ + +#include + + .machine ppc64 + .balign 256 + .globl purgatory_start +purgatory_start: + b master + + /* ABI: possible run_at_load flag at 0x5c */ + .org purgatory_start + 0x5c + .globl run_at_load +run_at_load: + .long 0 + .size run_at_load, . - run_at_load + + /* ABI: slaves start at 60 with r3=phys */ + .org purgatory_start + 0x60 +slave: + b . + /* ABI: end of copied region */ + .org purgatory_start + 0x100 + .size purgatory_start, . - purgatory_start + +/* + * The above 0x100 bytes at purgatory_start are replaced with the + * code from the kernel (or next stage) by setup_purgatory(). + */ + +master: + or %r1,%r1,%r1 /* low priority to let other threads catchup */ + isync + mr %r17,%r3 /* save cpu id to r17 */ + mr %r15,%r4 /* save physical address in reg15 */ + + or %r3,%r3,%r3 /* ok now to high priority, lets boot */ + lis %r6,0x1 + mtctr %r6 /* delay a bit for slaves to catch up */ + bdnz . /* before we overwrite 0-100 again */ + + bl 0f /* Work out where we're running */ +0: mflr %r18 + + /* load device-tree address */ + ld %r3, (dt_offset - 0b)(%r18) + mr %r16,%r3 /* save dt address in reg16 */ + li %r4,20 + LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ + cmpwi %cr0,%r6,2 /* v2 or later? */ + blt 1f + li %r4,28 + STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ +1: + /* load the kernel address */ + ld %r4,(kernel - 0b)(%r18) + + /* load the run_at_load flag */ + /* possibly patched by kexec */ + ld %r6,(run_at_load - 0b)(%r18) + /* and patch it into the kernel */ + stw %r6,(0x5c)(%r4) + + mr %r3,%r16 /* restore dt address */ + + li %r5,0 /* r5 will be 0 for kernel */ + + mfmsr %r11 + andi. %r10,%r11,1 /* test MSR_LE */ + bne .Little_endian + + mtctr %r4 /* prepare branch to */ + bctr /* start kernel */ + +.Little_endian: + mtsrr0 %r4 /* prepare branch to */ + + clrrdi %r11,%r11,1 /* clear MSR_LE */ + mtsrr1 %r11 + + rfid /* update MSR and start kernel */ + + + .balign 8 + .globl kernel +kernel: + .8byte 0x0 + .size kernel, . - kernel + + .balign 8 + .globl dt_offset +dt_offset: + .8byte 0x0 + .size dt_offset, . - dt_offset + + + .data + .balign 8 +.globl purgatory_sha256_digest +purgatory_sha256_digest: + .skip 32 + .size purgatory_sha256_digest, . - purgatory_sha256_digest + + .balign 8 +.globl purgatory_sha_regions +purgatory_sha_regions: + .skip 8 * 2 * 16 + .size purgatory_sha_regions, . - purgatory_sha_regions -- cgit v1.2.3 From 180adfc532a83c1d74146449f7385f767d4b8059 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:10:00 +0530 Subject: powerpc/kexec_file: Add helper functions for getting memory ranges In kexec case, the kernel to be loaded uses the same memory layout as the running kernel. So, passing on the DT of the running kernel would be good enough. But in case of kdump, different memory ranges are needed to manage loading the kdump kernel, booting into it and exporting the elfcore of the crashing kernel. The ranges are exclude memory ranges, usable memory ranges, reserved memory ranges and crash memory ranges. Exclude memory ranges specify the list of memory ranges to avoid while loading kdump segments. Usable memory ranges list the memory ranges that could be used for booting kdump kernel. Reserved memory ranges list the memory regions for the loading kernel's reserve map. Crash memory ranges list the memory ranges to be exported as the crashing kernel's elfcore. Add helper functions for setting up the above mentioned memory ranges. This helpers facilitate in understanding the subsequent changes better and make it easy to setup the different memory ranges listed above, as and when appropriate. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602279194.575379.8526552316948643550.stgit@hbathini --- arch/powerpc/include/asm/kexec_ranges.h | 11 ++ arch/powerpc/kexec/Makefile | 2 +- arch/powerpc/kexec/ranges.c | 235 ++++++++++++++++++++++++++++++++ 3 files changed, 247 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/include/asm/kexec_ranges.h create mode 100644 arch/powerpc/kexec/ranges.c diff --git a/arch/powerpc/include/asm/kexec_ranges.h b/arch/powerpc/include/asm/kexec_ranges.h new file mode 100644 index 000000000000..35ae31a7a4de --- /dev/null +++ b/arch/powerpc/include/asm/kexec_ranges.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_KEXEC_RANGES_H +#define _ASM_POWERPC_KEXEC_RANGES_H + +#define MEM_RANGE_CHUNK_SZ 2048 /* Memory ranges size chunk */ + +void sort_memory_ranges(struct crash_mem *mrngs, bool merge); +struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges); +int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size); + +#endif /* _ASM_POWERPC_KEXEC_RANGES_H */ diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile index 67c355329457..4aff6846c772 100644 --- a/arch/powerpc/kexec/Makefile +++ b/arch/powerpc/kexec/Makefile @@ -7,7 +7,7 @@ obj-y += core.o crash.o core_$(BITS).o obj-$(CONFIG_PPC32) += relocate_32.o -obj-$(CONFIG_KEXEC_FILE) += file_load.o file_load_$(BITS).o elf_$(BITS).o +obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o ifdef CONFIG_HAVE_IMA_KEXEC ifdef CONFIG_IMA diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c new file mode 100644 index 000000000000..dc3ce036f416 --- /dev/null +++ b/arch/powerpc/kexec/ranges.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * powerpc code to implement the kexec_file_load syscall + * + * Copyright (C) 2004 Adam Litke (agl@us.ibm.com) + * Copyright (C) 2004 IBM Corp. + * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation + * Copyright (C) 2005 R Sharada (sharada@in.ibm.com) + * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com) + * Copyright (C) 2020 IBM Corporation + * + * Based on kexec-tools' kexec-ppc64.c, fs2dt.c. + * Heavily modified for the kernel by + * Hari Bathini, IBM Corporation. + */ + +#define pr_fmt(fmt) "kexec ranges: " fmt + +#include +#include +#include +#include +#include +#include + +/** + * get_max_nr_ranges - Get the max no. of ranges crash_mem structure + * could hold, given the size allocated for it. + * @size: Allocation size of crash_mem structure. + * + * Returns the maximum no. of ranges. + */ +static inline unsigned int get_max_nr_ranges(size_t size) +{ + return ((size - sizeof(struct crash_mem)) / + sizeof(struct crash_mem_range)); +} + +/** + * get_mem_rngs_size - Get the allocated size of mem_rngs based on + * max_nr_ranges and chunk size. + * @mem_rngs: Memory ranges. + * + * Returns the maximum size of @mem_rngs. + */ +static inline size_t get_mem_rngs_size(struct crash_mem *mem_rngs) +{ + size_t size; + + if (!mem_rngs) + return 0; + + size = (sizeof(struct crash_mem) + + (mem_rngs->max_nr_ranges * sizeof(struct crash_mem_range))); + + /* + * Memory is allocated in size multiple of MEM_RANGE_CHUNK_SZ. + * So, align to get the actual length. + */ + return ALIGN(size, MEM_RANGE_CHUNK_SZ); +} + +/** + * __add_mem_range - add a memory range to memory ranges list. + * @mem_ranges: Range list to add the memory range to. + * @base: Base address of the range to add. + * @size: Size of the memory range to add. + * + * (Re)allocates memory, if needed. + * + * Returns 0 on success, negative errno on error. + */ +static int __add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size) +{ + struct crash_mem *mem_rngs = *mem_ranges; + + if (!mem_rngs || (mem_rngs->nr_ranges == mem_rngs->max_nr_ranges)) { + mem_rngs = realloc_mem_ranges(mem_ranges); + if (!mem_rngs) + return -ENOMEM; + } + + mem_rngs->ranges[mem_rngs->nr_ranges].start = base; + mem_rngs->ranges[mem_rngs->nr_ranges].end = base + size - 1; + pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n", + base, base + size - 1, mem_rngs->nr_ranges); + mem_rngs->nr_ranges++; + return 0; +} + +/** + * __merge_memory_ranges - Merges the given memory ranges list. + * @mem_rngs: Range list to merge. + * + * Assumes a sorted range list. + * + * Returns nothing. + */ +static void __merge_memory_ranges(struct crash_mem *mem_rngs) +{ + struct crash_mem_range *ranges; + int i, idx; + + if (!mem_rngs) + return; + + idx = 0; + ranges = &(mem_rngs->ranges[0]); + for (i = 1; i < mem_rngs->nr_ranges; i++) { + if (ranges[i].start <= (ranges[i-1].end + 1)) + ranges[idx].end = ranges[i].end; + else { + idx++; + if (i == idx) + continue; + + ranges[idx] = ranges[i]; + } + } + mem_rngs->nr_ranges = idx + 1; +} + +/* cmp_func_t callback to sort ranges with sort() */ +static int rngcmp(const void *_x, const void *_y) +{ + const struct crash_mem_range *x = _x, *y = _y; + + if (x->start > y->start) + return 1; + if (x->start < y->start) + return -1; + return 0; +} + +/** + * sort_memory_ranges - Sorts the given memory ranges list. + * @mem_rngs: Range list to sort. + * @merge: If true, merge the list after sorting. + * + * Returns nothing. + */ +void sort_memory_ranges(struct crash_mem *mem_rngs, bool merge) +{ + int i; + + if (!mem_rngs) + return; + + /* Sort the ranges in-place */ + sort(&(mem_rngs->ranges[0]), mem_rngs->nr_ranges, + sizeof(mem_rngs->ranges[0]), rngcmp, NULL); + + if (merge) + __merge_memory_ranges(mem_rngs); + + /* For debugging purpose */ + pr_debug("Memory ranges:\n"); + for (i = 0; i < mem_rngs->nr_ranges; i++) { + pr_debug("\t[%03d][%#016llx - %#016llx]\n", i, + mem_rngs->ranges[i].start, + mem_rngs->ranges[i].end); + } +} + +/** + * realloc_mem_ranges - reallocate mem_ranges with size incremented + * by MEM_RANGE_CHUNK_SZ. Frees up the old memory, + * if memory allocation fails. + * @mem_ranges: Memory ranges to reallocate. + * + * Returns pointer to reallocated memory on success, NULL otherwise. + */ +struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges) +{ + struct crash_mem *mem_rngs = *mem_ranges; + unsigned int nr_ranges; + size_t size; + + size = get_mem_rngs_size(mem_rngs); + nr_ranges = mem_rngs ? mem_rngs->nr_ranges : 0; + + size += MEM_RANGE_CHUNK_SZ; + mem_rngs = krealloc(*mem_ranges, size, GFP_KERNEL); + if (!mem_rngs) { + kfree(*mem_ranges); + *mem_ranges = NULL; + return NULL; + } + + mem_rngs->nr_ranges = nr_ranges; + mem_rngs->max_nr_ranges = get_max_nr_ranges(size); + *mem_ranges = mem_rngs; + + return mem_rngs; +} + +/** + * add_mem_range - Updates existing memory range, if there is an overlap. + * Else, adds a new memory range. + * @mem_ranges: Range list to add the memory range to. + * @base: Base address of the range to add. + * @size: Size of the memory range to add. + * + * (Re)allocates memory, if needed. + * + * Returns 0 on success, negative errno on error. + */ +int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size) +{ + struct crash_mem *mem_rngs = *mem_ranges; + u64 mstart, mend, end; + unsigned int i; + + if (!size) + return 0; + + end = base + size - 1; + + if (!mem_rngs || !(mem_rngs->nr_ranges)) + return __add_mem_range(mem_ranges, base, size); + + for (i = 0; i < mem_rngs->nr_ranges; i++) { + mstart = mem_rngs->ranges[i].start; + mend = mem_rngs->ranges[i].end; + if (base < mend && end > mstart) { + if (base < mstart) + mem_rngs->ranges[i].start = base; + if (end > mend) + mem_rngs->ranges[i].end = end; + return 0; + } + } + + return __add_mem_range(mem_ranges, base, size); +} -- cgit v1.2.3 From b8e55a3e5c208862eacded5aad822184f89f85d9 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:10:16 +0530 Subject: powerpc/kexec_file: Avoid stomping memory used by special regions crashkernel region could have an overlap with special memory regions like OPAL, RTAS, TCE table & such. These regions are referred to as excluded memory ranges. Setup these ranges during image probe in order to avoid them while finding the buffer for different kdump segments. Override arch_kexec_locate_mem_hole() to locate a memory hole taking these ranges into account. Signed-off-by: Hari Bathini Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602281047.575379.6636807148335160795.stgit@hbathini --- arch/powerpc/include/asm/kexec.h | 7 +- arch/powerpc/include/asm/kexec_ranges.h | 14 ++ arch/powerpc/kexec/elf_64.c | 8 + arch/powerpc/kexec/file_load_64.c | 337 +++++++++++++++++++++++++++++++- arch/powerpc/kexec/ranges.c | 177 +++++++++++++++++ 5 files changed, 539 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 9b3feef55aaa..639f31e13593 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -100,14 +100,16 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; -#ifdef CONFIG_IMA_KEXEC #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { + struct crash_mem *exclude_ranges; + +#ifdef CONFIG_IMA_KEXEC phys_addr_t ima_buffer_addr; size_t ima_buffer_size; -}; #endif +}; int setup_purgatory(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, @@ -125,6 +127,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline); #endif /* CONFIG_PPC64 */ + #endif /* CONFIG_KEXEC_FILE */ #else /* !CONFIG_KEXEC_CORE */ diff --git a/arch/powerpc/include/asm/kexec_ranges.h b/arch/powerpc/include/asm/kexec_ranges.h index 35ae31a7a4de..7a90000f8d15 100644 --- a/arch/powerpc/include/asm/kexec_ranges.h +++ b/arch/powerpc/include/asm/kexec_ranges.h @@ -7,5 +7,19 @@ void sort_memory_ranges(struct crash_mem *mrngs, bool merge); struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges); int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size); +int add_tce_mem_ranges(struct crash_mem **mem_ranges); +int add_initrd_mem_range(struct crash_mem **mem_ranges); +#ifdef CONFIG_PPC_BOOK3S_64 +int add_htab_mem_range(struct crash_mem **mem_ranges); +#else +static inline int add_htab_mem_range(struct crash_mem **mem_ranges) +{ + return 0; +} +#endif +int add_kernel_mem_range(struct crash_mem **mem_ranges); +int add_rtas_mem_range(struct crash_mem **mem_ranges); +int add_opal_mem_range(struct crash_mem **mem_ranges); +int add_reserved_mem_ranges(struct crash_mem **mem_ranges); #endif /* _ASM_POWERPC_KEXEC_RANGES_H */ diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 23ad04ccaf8e..64c15a5a280b 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -46,6 +46,14 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, if (ret) goto out; + if (image->type == KEXEC_TYPE_CRASH) { + /* min & max buffer values for kdump case */ + kbuf.buf_min = pbuf.buf_min = crashk_res.start; + kbuf.buf_max = pbuf.buf_max = + ((crashk_res.end < ppc64_rma_size) ? + crashk_res.end : (ppc64_rma_size - 1)); + } + ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr); if (ret) goto out; diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 3e9ac5f216b0..d09c7724efa8 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -17,12 +17,262 @@ #include #include #include +#include +#include const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_elf64_ops, NULL }; +/** + * get_exclude_memory_ranges - Get exclude memory ranges. This list includes + * regions like opal/rtas, tce-table, initrd, + * kernel, htab which should be avoided while + * setting up kexec load segments. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +static int get_exclude_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + ret = add_tce_mem_ranges(mem_ranges); + if (ret) + goto out; + + ret = add_initrd_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_htab_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_kernel_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_reserved_mem_ranges(mem_ranges); + if (ret) + goto out; + + /* exclude memory ranges should be sorted for easy lookup */ + sort_memory_ranges(*mem_ranges, true); +out: + if (ret) + pr_err("Failed to setup exclude memory ranges\n"); + return ret; +} + +/** + * __locate_mem_hole_top_down - Looks top down for a large enough memory hole + * in the memory regions between buf_min & buf_max + * for the buffer. If found, sets kbuf->mem. + * @kbuf: Buffer contents and memory parameters. + * @buf_min: Minimum address for the buffer. + * @buf_max: Maximum address for the buffer. + * + * Returns 0 on success, negative errno on error. + */ +static int __locate_mem_hole_top_down(struct kexec_buf *kbuf, + u64 buf_min, u64 buf_max) +{ + int ret = -EADDRNOTAVAIL; + phys_addr_t start, end; + u64 i; + + for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, + MEMBLOCK_NONE, &start, &end, NULL) { + /* + * memblock uses [start, end) convention while it is + * [start, end] here. Fix the off-by-one to have the + * same convention. + */ + end -= 1; + + if (start > buf_max) + continue; + + /* Memory hole not found */ + if (end < buf_min) + break; + + /* Adjust memory region based on the given range */ + if (start < buf_min) + start = buf_min; + if (end > buf_max) + end = buf_max; + + start = ALIGN(start, kbuf->buf_align); + if (start < end && (end - start + 1) >= kbuf->memsz) { + /* Suitable memory range found. Set kbuf->mem */ + kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1, + kbuf->buf_align); + ret = 0; + break; + } + } + + return ret; +} + +/** + * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a + * suitable buffer with top down approach. + * @kbuf: Buffer contents and memory parameters. + * @buf_min: Minimum address for the buffer. + * @buf_max: Maximum address for the buffer. + * @emem: Exclude memory ranges. + * + * Returns 0 on success, negative errno on error. + */ +static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf, + u64 buf_min, u64 buf_max, + const struct crash_mem *emem) +{ + int i, ret = 0, err = -EADDRNOTAVAIL; + u64 start, end, tmin, tmax; + + tmax = buf_max; + for (i = (emem->nr_ranges - 1); i >= 0; i--) { + start = emem->ranges[i].start; + end = emem->ranges[i].end; + + if (start > tmax) + continue; + + if (end < tmax) { + tmin = (end < buf_min ? buf_min : end + 1); + ret = __locate_mem_hole_top_down(kbuf, tmin, tmax); + if (!ret) + return 0; + } + + tmax = start - 1; + + if (tmax < buf_min) { + ret = err; + break; + } + ret = 0; + } + + if (!ret) { + tmin = buf_min; + ret = __locate_mem_hole_top_down(kbuf, tmin, tmax); + } + return ret; +} + +/** + * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole + * in the memory regions between buf_min & buf_max + * for the buffer. If found, sets kbuf->mem. + * @kbuf: Buffer contents and memory parameters. + * @buf_min: Minimum address for the buffer. + * @buf_max: Maximum address for the buffer. + * + * Returns 0 on success, negative errno on error. + */ +static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf, + u64 buf_min, u64 buf_max) +{ + int ret = -EADDRNOTAVAIL; + phys_addr_t start, end; + u64 i; + + for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, + MEMBLOCK_NONE, &start, &end, NULL) { + /* + * memblock uses [start, end) convention while it is + * [start, end] here. Fix the off-by-one to have the + * same convention. + */ + end -= 1; + + if (end < buf_min) + continue; + + /* Memory hole not found */ + if (start > buf_max) + break; + + /* Adjust memory region based on the given range */ + if (start < buf_min) + start = buf_min; + if (end > buf_max) + end = buf_max; + + start = ALIGN(start, kbuf->buf_align); + if (start < end && (end - start + 1) >= kbuf->memsz) { + /* Suitable memory range found. Set kbuf->mem */ + kbuf->mem = start; + ret = 0; + break; + } + } + + return ret; +} + +/** + * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a + * suitable buffer with bottom up approach. + * @kbuf: Buffer contents and memory parameters. + * @buf_min: Minimum address for the buffer. + * @buf_max: Maximum address for the buffer. + * @emem: Exclude memory ranges. + * + * Returns 0 on success, negative errno on error. + */ +static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf, + u64 buf_min, u64 buf_max, + const struct crash_mem *emem) +{ + int i, ret = 0, err = -EADDRNOTAVAIL; + u64 start, end, tmin, tmax; + + tmin = buf_min; + for (i = 0; i < emem->nr_ranges; i++) { + start = emem->ranges[i].start; + end = emem->ranges[i].end; + + if (end < tmin) + continue; + + if (start > tmin) { + tmax = (start > buf_max ? buf_max : start - 1); + ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax); + if (!ret) + return 0; + } + + tmin = end + 1; + + if (tmin > buf_max) { + ret = err; + break; + } + ret = 0; + } + + if (!ret) { + tmax = buf_max; + ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax); + } + return ret; +} + /** * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global * variables and call setup_purgatory() to initialize @@ -67,6 +317,67 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, return setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); } +/** + * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal, + * tce-table, reserved-ranges & such (exclude + * memory ranges) as they can't be used for kexec + * segment buffer. Sets kbuf->mem when a suitable + * memory hole is found. + * @kbuf: Buffer contents and memory parameters. + * + * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align. + * + * Returns 0 on success, negative errno on error. + */ +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + struct crash_mem **emem; + u64 buf_min, buf_max; + int ret; + + /* + * Use the generic kexec_locate_mem_hole for regular + * kexec_file_load syscall + */ + if (kbuf->image->type != KEXEC_TYPE_CRASH) + return kexec_locate_mem_hole(kbuf); + + /* Look up the exclude ranges list while locating the memory hole */ + emem = &(kbuf->image->arch.exclude_ranges); + if (!(*emem) || ((*emem)->nr_ranges == 0)) { + pr_warn("No exclude range list. Using the default locate mem hole method\n"); + return kexec_locate_mem_hole(kbuf); + } + + /* Segments for kdump kernel should be within crashkernel region */ + buf_min = (kbuf->buf_min < crashk_res.start ? + crashk_res.start : kbuf->buf_min); + buf_max = (kbuf->buf_max > crashk_res.end ? + crashk_res.end : kbuf->buf_max); + + if (buf_min > buf_max) { + pr_err("Invalid buffer min and/or max values\n"); + return -EINVAL; + } + + if (kbuf->top_down) + ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max, + *emem); + else + ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max, + *emem); + + /* Add the buffer allocated to the exclude list for the next lookup */ + if (!ret) { + add_mem_range(emem, kbuf->mem, kbuf->memsz); + sort_memory_ranges(*emem, true); + } else { + pr_err("Failed to locate memory buffer of size %lu\n", + kbuf->memsz); + } + return ret; +} + /** * arch_kexec_kernel_image_probe - Does additional handling needed to setup * kexec segments. @@ -79,9 +390,31 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) { - /* We don't support crash kernels yet. */ - if (image->type == KEXEC_TYPE_CRASH) + if (image->type == KEXEC_TYPE_CRASH) { + int ret; + + /* Get exclude memory ranges needed for setting up kdump segments */ + ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges)); + if (ret) + pr_err("Failed to setup exclude memory ranges for buffer lookup\n"); + /* Return this until all changes for panic kernel are in */ return -EOPNOTSUPP; + } return kexec_image_probe_default(image, buf, buf_len); } + +/** + * arch_kimage_file_post_load_cleanup - Frees up all the allocations done + * while loading the image. + * @image: kexec image being loaded. + * + * Returns 0 on success, negative errno on error. + */ +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + kfree(image->arch.exclude_ranges); + image->arch.exclude_ranges = NULL; + + return kexec_image_post_load_cleanup_default(image); +} diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c index dc3ce036f416..6b81c852feab 100644 --- a/arch/powerpc/kexec/ranges.c +++ b/arch/powerpc/kexec/ranges.c @@ -233,3 +233,180 @@ int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size) return __add_mem_range(mem_ranges, base, size); } + +/** + * add_tce_mem_ranges - Adds tce-table range to the given memory ranges list. + * @mem_ranges: Range list to add the memory range(s) to. + * + * Returns 0 on success, negative errno on error. + */ +int add_tce_mem_ranges(struct crash_mem **mem_ranges) +{ + struct device_node *dn = NULL; + int ret = 0; + + for_each_node_by_type(dn, "pci") { + u64 base; + u32 size; + + ret = of_property_read_u64(dn, "linux,tce-base", &base); + ret |= of_property_read_u32(dn, "linux,tce-size", &size); + if (ret) { + /* + * It is ok to have pci nodes without tce. So, ignore + * property does not exist error. + */ + if (ret == -EINVAL) { + ret = 0; + continue; + } + break; + } + + ret = add_mem_range(mem_ranges, base, size); + if (ret) + break; + } + + of_node_put(dn); + return ret; +} + +/** + * add_initrd_mem_range - Adds initrd range to the given memory ranges list, + * if the initrd was retained. + * @mem_ranges: Range list to add the memory range to. + * + * Returns 0 on success, negative errno on error. + */ +int add_initrd_mem_range(struct crash_mem **mem_ranges) +{ + u64 base, end; + int ret; + + /* This range means something, only if initrd was retained */ + if (!strstr(saved_command_line, "retain_initrd")) + return 0; + + ret = of_property_read_u64(of_chosen, "linux,initrd-start", &base); + ret |= of_property_read_u64(of_chosen, "linux,initrd-end", &end); + if (!ret) + ret = add_mem_range(mem_ranges, base, end - base + 1); + + return ret; +} + +#ifdef CONFIG_PPC_BOOK3S_64 +/** + * add_htab_mem_range - Adds htab range to the given memory ranges list, + * if it exists + * @mem_ranges: Range list to add the memory range to. + * + * Returns 0 on success, negative errno on error. + */ +int add_htab_mem_range(struct crash_mem **mem_ranges) +{ + if (!htab_address) + return 0; + + return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes); +} +#endif + +/** + * add_kernel_mem_range - Adds kernel text region to the given + * memory ranges list. + * @mem_ranges: Range list to add the memory range to. + * + * Returns 0 on success, negative errno on error. + */ +int add_kernel_mem_range(struct crash_mem **mem_ranges) +{ + return add_mem_range(mem_ranges, 0, __pa(_end)); +} + +/** + * add_rtas_mem_range - Adds RTAS region to the given memory ranges list. + * @mem_ranges: Range list to add the memory range to. + * + * Returns 0 on success, negative errno on error. + */ +int add_rtas_mem_range(struct crash_mem **mem_ranges) +{ + struct device_node *dn; + u32 base, size; + int ret = 0; + + dn = of_find_node_by_path("/rtas"); + if (!dn) + return 0; + + ret = of_property_read_u32(dn, "linux,rtas-base", &base); + ret |= of_property_read_u32(dn, "rtas-size", &size); + if (!ret) + ret = add_mem_range(mem_ranges, base, size); + + of_node_put(dn); + return ret; +} + +/** + * add_opal_mem_range - Adds OPAL region to the given memory ranges list. + * @mem_ranges: Range list to add the memory range to. + * + * Returns 0 on success, negative errno on error. + */ +int add_opal_mem_range(struct crash_mem **mem_ranges) +{ + struct device_node *dn; + u64 base, size; + int ret; + + dn = of_find_node_by_path("/ibm,opal"); + if (!dn) + return 0; + + ret = of_property_read_u64(dn, "opal-base-address", &base); + ret |= of_property_read_u64(dn, "opal-runtime-size", &size); + if (!ret) + ret = add_mem_range(mem_ranges, base, size); + + of_node_put(dn); + return ret; +} + +/** + * add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w + * to the given memory ranges list. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +int add_reserved_mem_ranges(struct crash_mem **mem_ranges) +{ + int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0; + const __be32 *prop; + + prop = of_get_property(of_root, "reserved-ranges", &len); + if (!prop) + return 0; + + n_mem_addr_cells = of_n_addr_cells(of_root); + n_mem_size_cells = of_n_size_cells(of_root); + cells = n_mem_addr_cells + n_mem_size_cells; + + /* Each reserved range is an (address,size) pair */ + for (i = 0; i < (len / (sizeof(u32) * cells)); i++) { + u64 base, size; + + base = of_read_number(prop + (i * cells), n_mem_addr_cells); + size = of_read_number(prop + (i * cells) + n_mem_addr_cells, + n_mem_size_cells); + + ret = add_mem_range(mem_ranges, base, size); + if (ret) + break; + } + + return ret; +} -- cgit v1.2.3 From adfefc609e55edc5dce18a68d1526af6d70aaf86 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:10:32 +0530 Subject: powerpc/drmem: Make LMB walk a bit more flexible Currently, numa & prom are the only users of drmem LMB walk code. Loading kdump with kexec_file also needs to walk the drmem LMBs to setup the usable memory ranges for kdump kernel. But there are couple of issues in using the code as is. One, walk_drmem_lmb() code is built into the .init section currently, while kexec_file needs it later. Two, there is no scope to pass data to the callback function for processing and/or erroring out on certain conditions. Fix that by, moving drmem LMB walk code out of .init section, adding scope to pass data to the callback function and bailing out when an error is encountered in the callback function. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602282727.575379.3979857013827701828.stgit@hbathini --- arch/powerpc/include/asm/drmem.h | 9 +++-- arch/powerpc/kernel/prom.c | 13 +++--- arch/powerpc/mm/drmem.c | 87 ++++++++++++++++++++++++++-------------- arch/powerpc/mm/numa.c | 13 +++--- 4 files changed, 78 insertions(+), 44 deletions(-) diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index 414d209f45bb..17ccc6474ab6 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -90,13 +90,14 @@ static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb) } u64 drmem_lmb_memory_max(void); -void __init walk_drmem_lmbs(struct device_node *dn, - void (*func)(struct drmem_lmb *, const __be32 **)); +int walk_drmem_lmbs(struct device_node *dn, void *data, + int (*func)(struct drmem_lmb *, const __be32 **, void *)); int drmem_update_dt(void); #ifdef CONFIG_PPC_PSERIES -void __init walk_drmem_lmbs_early(unsigned long node, - void (*func)(struct drmem_lmb *, const __be32 **)); +int __init +walk_drmem_lmbs_early(unsigned long node, void *data, + int (*func)(struct drmem_lmb *, const __be32 **, void *)); #endif static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 5a05c5e993fd..d8a2fb87ba0c 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -470,8 +470,9 @@ static bool validate_mem_limit(u64 base, u64 *size) * This contains a list of memory blocks along with NUMA affinity * information. */ -static void __init early_init_drmem_lmb(struct drmem_lmb *lmb, - const __be32 **usm) +static int __init early_init_drmem_lmb(struct drmem_lmb *lmb, + const __be32 **usm, + void *data) { u64 base, size; int is_kexec_kdump = 0, rngs; @@ -486,7 +487,7 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb, */ if ((lmb->flags & DRCONF_MEM_RESERVED) || !(lmb->flags & DRCONF_MEM_ASSIGNED)) - return; + return 0; if (*usm) is_kexec_kdump = 1; @@ -501,7 +502,7 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb, */ rngs = dt_mem_next_cell(dt_root_size_cells, usm); if (!rngs) /* there are no (base, size) duple */ - return; + return 0; } do { @@ -526,6 +527,8 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb, if (lmb->flags & DRCONF_MEM_HOTREMOVABLE) memblock_mark_hotplug(base, size); } while (--rngs); + + return 0; } #endif /* CONFIG_PPC_PSERIES */ @@ -536,7 +539,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, #ifdef CONFIG_PPC_PSERIES if (depth == 1 && strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) { - walk_drmem_lmbs_early(node, early_init_drmem_lmb); + walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb); return 0; } #endif diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index 59327cefbc6a..b2eeea39684c 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -14,6 +14,8 @@ #include #include +static int n_root_addr_cells, n_root_size_cells; + static struct drmem_lmb_info __drmem_info; struct drmem_lmb_info *drmem_info = &__drmem_info; @@ -189,12 +191,13 @@ int drmem_update_dt(void) return rc; } -static void __init read_drconf_v1_cell(struct drmem_lmb *lmb, +static void read_drconf_v1_cell(struct drmem_lmb *lmb, const __be32 **prop) { const __be32 *p = *prop; - lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p); + lmb->base_addr = of_read_number(p, n_root_addr_cells); + p += n_root_addr_cells; lmb->drc_index = of_read_number(p++, 1); p++; /* skip reserved field */ @@ -205,29 +208,33 @@ static void __init read_drconf_v1_cell(struct drmem_lmb *lmb, *prop = p; } -static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, - void (*func)(struct drmem_lmb *, const __be32 **)) +static int +__walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data, + int (*func)(struct drmem_lmb *, const __be32 **, void *)) { struct drmem_lmb lmb; u32 i, n_lmbs; + int ret = 0; n_lmbs = of_read_number(prop++, 1); - if (n_lmbs == 0) - return; - for (i = 0; i < n_lmbs; i++) { read_drconf_v1_cell(&lmb, &prop); - func(&lmb, &usm); + ret = func(&lmb, &usm, data); + if (ret) + break; } + + return ret; } -static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, +static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, const __be32 **prop) { const __be32 *p = *prop; dr_cell->seq_lmbs = of_read_number(p++, 1); - dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p); + dr_cell->base_addr = of_read_number(p, n_root_addr_cells); + p += n_root_addr_cells; dr_cell->drc_index = of_read_number(p++, 1); dr_cell->aa_index = of_read_number(p++, 1); dr_cell->flags = of_read_number(p++, 1); @@ -235,17 +242,16 @@ static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, *prop = p; } -static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, - void (*func)(struct drmem_lmb *, const __be32 **)) +static int +__walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data, + int (*func)(struct drmem_lmb *, const __be32 **, void *)) { struct of_drconf_cell_v2 dr_cell; struct drmem_lmb lmb; u32 i, j, lmb_sets; + int ret = 0; lmb_sets = of_read_number(prop++, 1); - if (lmb_sets == 0) - return; - for (i = 0; i < lmb_sets; i++) { read_drconf_v2_cell(&dr_cell, &prop); @@ -259,21 +265,29 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, lmb.aa_index = dr_cell.aa_index; lmb.flags = dr_cell.flags; - func(&lmb, &usm); + ret = func(&lmb, &usm, data); + if (ret) + break; } } + + return ret; } #ifdef CONFIG_PPC_PSERIES -void __init walk_drmem_lmbs_early(unsigned long node, - void (*func)(struct drmem_lmb *, const __be32 **)) +int __init walk_drmem_lmbs_early(unsigned long node, void *data, + int (*func)(struct drmem_lmb *, const __be32 **, void *)) { const __be32 *prop, *usm; - int len; + int len, ret = -ENODEV; prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); if (!prop || len < dt_root_size_cells * sizeof(__be32)) - return; + return ret; + + /* Get the address & size cells */ + n_root_addr_cells = dt_root_addr_cells; + n_root_size_cells = dt_root_size_cells; drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop); @@ -281,20 +295,21 @@ void __init walk_drmem_lmbs_early(unsigned long node, prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len); if (prop) { - __walk_drmem_v1_lmbs(prop, usm, func); + ret = __walk_drmem_v1_lmbs(prop, usm, data, func); } else { prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2", &len); if (prop) - __walk_drmem_v2_lmbs(prop, usm, func); + ret = __walk_drmem_v2_lmbs(prop, usm, data, func); } memblock_dump_all(); + return ret; } #endif -static int __init init_drmem_lmb_size(struct device_node *dn) +static int init_drmem_lmb_size(struct device_node *dn) { const __be32 *prop; int len; @@ -303,12 +318,12 @@ static int __init init_drmem_lmb_size(struct device_node *dn) return 0; prop = of_get_property(dn, "ibm,lmb-size", &len); - if (!prop || len < dt_root_size_cells * sizeof(__be32)) { + if (!prop || len < n_root_size_cells * sizeof(__be32)) { pr_info("Could not determine LMB size\n"); return -1; } - drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop); + drmem_info->lmb_size = of_read_number(prop, n_root_size_cells); return 0; } @@ -329,24 +344,36 @@ static const __be32 *of_get_usable_memory(struct device_node *dn) return prop; } -void __init walk_drmem_lmbs(struct device_node *dn, - void (*func)(struct drmem_lmb *, const __be32 **)) +int walk_drmem_lmbs(struct device_node *dn, void *data, + int (*func)(struct drmem_lmb *, const __be32 **, void *)) { const __be32 *prop, *usm; + int ret = -ENODEV; + + if (!of_root) + return ret; + + /* Get the address & size cells */ + of_node_get(of_root); + n_root_addr_cells = of_n_addr_cells(of_root); + n_root_size_cells = of_n_size_cells(of_root); + of_node_put(of_root); if (init_drmem_lmb_size(dn)) - return; + return ret; usm = of_get_usable_memory(dn); prop = of_get_property(dn, "ibm,dynamic-memory", NULL); if (prop) { - __walk_drmem_v1_lmbs(prop, usm, func); + ret = __walk_drmem_v1_lmbs(prop, usm, data, func); } else { prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); if (prop) - __walk_drmem_v2_lmbs(prop, usm, func); + ret = __walk_drmem_v2_lmbs(prop, usm, data, func); } + + return ret; } static void __init init_drmem_v1_lmbs(const __be32 *prop) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 92938393fec6..058fee9a0835 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -645,8 +645,9 @@ static inline int __init read_usm_ranges(const __be32 **usm) * Extract NUMA information from the ibm,dynamic-reconfiguration-memory * node. This assumes n_mem_{addr,size}_cells have been set. */ -static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, - const __be32 **usm) +static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, + const __be32 **usm, + void *data) { unsigned int ranges, is_kexec_kdump = 0; unsigned long base, size, sz; @@ -658,7 +659,7 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, */ if ((lmb->flags & DRCONF_MEM_RESERVED) || !(lmb->flags & DRCONF_MEM_ASSIGNED)) - return; + return 0; if (*usm) is_kexec_kdump = 1; @@ -670,7 +671,7 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, if (is_kexec_kdump) { ranges = read_usm_ranges(usm); if (!ranges) /* there are no (base, size) duple */ - return; + return 0; } do { @@ -687,6 +688,8 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, if (sz) memblock_set_node(base, sz, &memblock.memory, nid); } while (--ranges); + + return 0; } static int __init parse_numa_properties(void) @@ -788,7 +791,7 @@ new_range: */ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { - walk_drmem_lmbs(memory, numa_setup_drmem_lmb); + walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb); of_node_put(memory); } -- cgit v1.2.3 From 7c64e21a1c5a5bcd651d895b8faa68e9cdcc433d Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:12:16 +0530 Subject: powerpc/kexec_file: Restrict memory usage of kdump kernel Kdump kernel, used for capturing the kernel core image, is supposed to use only specific memory regions to avoid corrupting the image to be captured. The regions are crashkernel range - the memory reserved explicitly for kdump kernel, memory used for the tce-table, the OPAL region and RTAS region as applicable. Restrict kdump kernel memory to use only these regions by setting up usable-memory DT property. Also, tell the kdump kernel to run at the loaded address by setting the magic word at 0x5c. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602284284.575379.6962016255404325493.stgit@hbathini --- arch/powerpc/kexec/file_load_64.c | 386 +++++++++++++++++++++++++++++++++++++- 1 file changed, 385 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index d09c7724efa8..f94660874765 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -17,9 +17,23 @@ #include #include #include +#include #include +#include +#include #include +struct umem_info { + u64 *buf; /* data buffer for usable-memory property */ + u32 size; /* size allocated for the data buffer */ + u32 max_entries; /* maximum no. of entries */ + u32 idx; /* index of current entry */ + + /* usable memory ranges to look up */ + unsigned int nr_ranges; + const struct crash_mem_range *ranges; +}; + const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_elf64_ops, NULL @@ -74,6 +88,44 @@ out: return ret; } +/** + * get_usable_memory_ranges - Get usable memory ranges. This list includes + * regions like crashkernel, opal/rtas & tce-table, + * that kdump kernel could use. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +static int get_usable_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + /* + * Early boot failure observed on guests when low memory (first memory + * block?) is not added to usable memory. So, add [0, crashk_res.end] + * instead of [crashk_res.start, crashk_res.end] to workaround it. + * Also, crashed kernel's memory must be added to reserve map to + * avoid kdump kernel from using it. + */ + ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1); + if (ret) + goto out; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_tce_mem_ranges(mem_ranges); +out: + if (ret) + pr_err("Failed to setup usable memory ranges\n"); + return ret; +} + /** * __locate_mem_hole_top_down - Looks top down for a large enough memory hole * in the memory regions between buf_min & buf_max @@ -273,6 +325,286 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf, return ret; } +/** + * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries + * @um_info: Usable memory buffer and ranges info. + * @cnt: No. of entries to accommodate. + * + * Frees up the old buffer if memory reallocation fails. + * + * Returns buffer on success, NULL on error. + */ +static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt) +{ + u32 new_size; + u64 *tbuf; + + if ((um_info->idx + cnt) <= um_info->max_entries) + return um_info->buf; + + new_size = um_info->size + MEM_RANGE_CHUNK_SZ; + tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL); + if (tbuf) { + um_info->buf = tbuf; + um_info->size = new_size; + um_info->max_entries = (um_info->size / sizeof(u64)); + } + + return tbuf; +} + +/** + * add_usable_mem - Add the usable memory ranges within the given memory range + * to the buffer + * @um_info: Usable memory buffer and ranges info. + * @base: Base address of memory range to look for. + * @end: End address of memory range to look for. + * + * Returns 0 on success, negative errno on error. + */ +static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end) +{ + u64 loc_base, loc_end; + bool add; + int i; + + for (i = 0; i < um_info->nr_ranges; i++) { + add = false; + loc_base = um_info->ranges[i].start; + loc_end = um_info->ranges[i].end; + if (loc_base >= base && loc_end <= end) + add = true; + else if (base < loc_end && end > loc_base) { + if (loc_base < base) + loc_base = base; + if (loc_end > end) + loc_end = end; + add = true; + } + + if (add) { + if (!check_realloc_usable_mem(um_info, 2)) + return -ENOMEM; + + um_info->buf[um_info->idx++] = cpu_to_be64(loc_base); + um_info->buf[um_info->idx++] = + cpu_to_be64(loc_end - loc_base + 1); + } + } + + return 0; +} + +/** + * kdump_setup_usable_lmb - This is a callback function that gets called by + * walk_drmem_lmbs for every LMB to set its + * usable memory ranges. + * @lmb: LMB info. + * @usm: linux,drconf-usable-memory property value. + * @data: Pointer to usable memory buffer and ranges info. + * + * Returns 0 on success, negative errno on error. + */ +static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm, + void *data) +{ + struct umem_info *um_info; + int tmp_idx, ret; + u64 base, end; + + /* + * kdump load isn't supported on kernels already booted with + * linux,drconf-usable-memory property. + */ + if (*usm) { + pr_err("linux,drconf-usable-memory property already exists!"); + return -EINVAL; + } + + um_info = data; + tmp_idx = um_info->idx; + if (!check_realloc_usable_mem(um_info, 1)) + return -ENOMEM; + + um_info->idx++; + base = lmb->base_addr; + end = base + drmem_lmb_size() - 1; + ret = add_usable_mem(um_info, base, end); + if (!ret) { + /* + * Update the no. of ranges added. Two entries (base & size) + * for every range added. + */ + um_info->buf[tmp_idx] = + cpu_to_be64((um_info->idx - tmp_idx - 1) / 2); + } + + return ret; +} + +#define NODE_PATH_LEN 256 +/** + * add_usable_mem_property - Add usable memory property for the given + * memory node. + * @fdt: Flattened device tree for the kdump kernel. + * @dn: Memory node. + * @um_info: Usable memory buffer and ranges info. + * + * Returns 0 on success, negative errno on error. + */ +static int add_usable_mem_property(void *fdt, struct device_node *dn, + struct umem_info *um_info) +{ + int n_mem_addr_cells, n_mem_size_cells, node; + char path[NODE_PATH_LEN]; + int i, len, ranges, ret; + const __be32 *prop; + u64 base, end; + + of_node_get(dn); + + if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) { + pr_err("Buffer (%d) too small for memory node: %pOF\n", + NODE_PATH_LEN, dn); + return -EOVERFLOW; + } + pr_debug("Memory node path: %s\n", path); + + /* Now that we know the path, find its offset in kdump kernel's fdt */ + node = fdt_path_offset(fdt, path); + if (node < 0) { + pr_err("Malformed device tree: error reading %s\n", path); + ret = -EINVAL; + goto out; + } + + /* Get the address & size cells */ + n_mem_addr_cells = of_n_addr_cells(dn); + n_mem_size_cells = of_n_size_cells(dn); + pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells, + n_mem_size_cells); + + um_info->idx = 0; + if (!check_realloc_usable_mem(um_info, 2)) { + ret = -ENOMEM; + goto out; + } + + prop = of_get_property(dn, "reg", &len); + if (!prop || len <= 0) { + ret = 0; + goto out; + } + + /* + * "reg" property represents sequence of (addr,size) tuples + * each representing a memory range. + */ + ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); + + for (i = 0; i < ranges; i++) { + base = of_read_number(prop, n_mem_addr_cells); + prop += n_mem_addr_cells; + end = base + of_read_number(prop, n_mem_size_cells) - 1; + prop += n_mem_size_cells; + + ret = add_usable_mem(um_info, base, end); + if (ret) + goto out; + } + + /* + * No kdump kernel usable memory found in this memory node. + * Write (0,0) tuple in linux,usable-memory property for + * this region to be ignored. + */ + if (um_info->idx == 0) { + um_info->buf[0] = 0; + um_info->buf[1] = 0; + um_info->idx = 2; + } + + ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf, + (um_info->idx * sizeof(u64))); + +out: + of_node_put(dn); + return ret; +} + + +/** + * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory + * and linux,drconf-usable-memory DT properties as + * appropriate to restrict its memory usage. + * @fdt: Flattened device tree for the kdump kernel. + * @usable_mem: Usable memory ranges for kdump kernel. + * + * Returns 0 on success, negative errno on error. + */ +static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem) +{ + struct umem_info um_info; + struct device_node *dn; + int node, ret = 0; + + if (!usable_mem) { + pr_err("Usable memory ranges for kdump kernel not found\n"); + return -ENOENT; + } + + node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory"); + if (node == -FDT_ERR_NOTFOUND) + pr_debug("No dynamic reconfiguration memory found\n"); + else if (node < 0) { + pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n"); + return -EINVAL; + } + + um_info.buf = NULL; + um_info.size = 0; + um_info.max_entries = 0; + um_info.idx = 0; + /* Memory ranges to look up */ + um_info.ranges = &(usable_mem->ranges[0]); + um_info.nr_ranges = usable_mem->nr_ranges; + + dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); + if (dn) { + ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb); + of_node_put(dn); + + if (ret) { + pr_err("Could not setup linux,drconf-usable-memory property for kdump\n"); + goto out; + } + + ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory", + um_info.buf, (um_info.idx * sizeof(u64))); + if (ret) { + pr_err("Failed to update fdt with linux,drconf-usable-memory property"); + goto out; + } + } + + /* + * Walk through each memory node and set linux,usable-memory property + * for the corresponding node in kdump kernel's fdt. + */ + for_each_node_by_type(dn, "memory") { + ret = add_usable_mem_property(fdt, dn, &um_info); + if (ret) { + pr_err("Failed to set linux,usable-memory property for %s node", + dn->full_name); + goto out; + } + } + +out: + kfree(um_info.buf); + return ret; +} + /** * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global * variables and call setup_purgatory() to initialize @@ -293,6 +625,25 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr, fdt_load_addr); + if (ret) + goto out; + + if (image->type == KEXEC_TYPE_CRASH) { + u32 my_run_at_load = 1; + + /* + * Tell relocatable kernel to run at load address + * via the word meant for that at 0x5c. + */ + ret = kexec_purgatory_get_set_symbol(image, "run_at_load", + &my_run_at_load, + sizeof(my_run_at_load), + false); + if (ret) + goto out; + } + +out: if (ret) pr_err("Failed to setup purgatory symbols"); return ret; @@ -314,7 +665,40 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline) { - return setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); + struct crash_mem *umem = NULL; + int ret; + + ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); + if (ret) + goto out; + + /* + * Restrict memory usage for kdump kernel by setting up + * usable memory ranges. + */ + if (image->type == KEXEC_TYPE_CRASH) { + ret = get_usable_memory_ranges(&umem); + if (ret) + goto out; + + ret = update_usable_mem_fdt(fdt, umem); + if (ret) { + pr_err("Error setting up usable-memory property for kdump kernel\n"); + goto out; + } + + /* Ensure we don't touch crashed kernel's memory */ + ret = fdt_add_mem_rsv(fdt, 0, crashk_res.start); + if (ret) { + pr_err("Error reserving crash memory: %s\n", + fdt_strerror(ret)); + goto out; + } + } + +out: + kfree(umem); + return ret; } /** -- cgit v1.2.3 From 1a1cf93c200581c72a3cd521e1e0a1a3b5d0077d Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:12:58 +0530 Subject: powerpc/kexec_file: Setup backup region for kdump kernel Though kdump kernel boots from loaded address, the first 64KB of it is copied down to real 0. So, setup a backup region and let purgatory copy the first 64KB of crashed kernel into this backup region before booting into kdump kernel. Update reserve map with backup region and crashed kernel's memory to avoid kdump kernel from accidentially using that memory. Signed-off-by: Hari Bathini Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602294718.575379.16216507537038008623.stgit@hbathini --- arch/powerpc/include/asm/crashdump-ppc64.h | 19 ++++++ arch/powerpc/include/asm/kexec.h | 7 +++ arch/powerpc/kexec/elf_64.c | 9 +++ arch/powerpc/kexec/file_load_64.c | 93 +++++++++++++++++++++++++++++- arch/powerpc/purgatory/trampoline_64.S | 38 ++++++++++-- 5 files changed, 159 insertions(+), 7 deletions(-) create mode 100644 arch/powerpc/include/asm/crashdump-ppc64.h diff --git a/arch/powerpc/include/asm/crashdump-ppc64.h b/arch/powerpc/include/asm/crashdump-ppc64.h new file mode 100644 index 000000000000..68d9717cc5ee --- /dev/null +++ b/arch/powerpc/include/asm/crashdump-ppc64.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_CRASHDUMP_PPC64_H +#define _ASM_POWERPC_CRASHDUMP_PPC64_H + +/* + * Backup region - first 64KB of System RAM + * + * If ever the below macros are to be changed, please be judicious. + * The implicit assumptions are: + * - start, end & size are less than UINT32_MAX. + * - start & size are at least 8 byte aligned. + * + * For implementation details: arch/powerpc/purgatory/trampoline_64.S + */ +#define BACKUP_SRC_START 0 +#define BACKUP_SRC_END 0xffff +#define BACKUP_SRC_SIZE (BACKUP_SRC_END - BACKUP_SRC_START + 1) + +#endif /* __ASM_POWERPC_CRASHDUMP_PPC64_H */ diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 639f31e13593..807bb58ed8fa 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -105,6 +105,9 @@ extern const struct kexec_file_ops kexec_elf64_ops; struct kimage_arch { struct crash_mem *exclude_ranges; + unsigned long backup_start; + void *backup_buf; + #ifdef CONFIG_IMA_KEXEC phys_addr_t ima_buffer_addr; size_t ima_buffer_size; @@ -120,6 +123,10 @@ int setup_new_fdt(const struct kimage *image, void *fdt, int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size); #ifdef CONFIG_PPC64 +struct kexec_buf; + +int load_crashdump_segments_ppc64(struct kimage *image, + struct kexec_buf *kbuf); int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 64c15a5a280b..76e2fc7e6dc3 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -68,6 +68,15 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); + /* Load additional segments needed for panic kernel */ + if (image->type == KEXEC_TYPE_CRASH) { + ret = load_crashdump_segments_ppc64(image, &kbuf); + if (ret) { + pr_err("Failed to load kdump kernel segments\n"); + goto out; + } + } + if (initrd != NULL) { kbuf.buffer = initrd; kbuf.bufsz = kbuf.memsz = initrd_len; diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index f94660874765..a81bffb72cc5 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -20,8 +20,10 @@ #include #include #include +#include #include #include +#include struct umem_info { u64 *buf; /* data buffer for usable-memory property */ @@ -605,6 +607,70 @@ out: return ret; } +/** + * load_backup_segment - Locate a memory hole to place the backup region. + * @image: Kexec image. + * @kbuf: Buffer contents and memory parameters. + * + * Returns 0 on success, negative errno on error. + */ +static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf) +{ + void *buf; + int ret; + + /* + * Setup a source buffer for backup segment. + * + * A source buffer has no meaning for backup region as data will + * be copied from backup source, after crash, in the purgatory. + * But as load segment code doesn't recognize such segments, + * setup a dummy source buffer to keep it happy for now. + */ + buf = vzalloc(BACKUP_SRC_SIZE); + if (!buf) + return -ENOMEM; + + kbuf->buffer = buf; + kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE; + kbuf->top_down = false; + + ret = kexec_add_buffer(kbuf); + if (ret) { + vfree(buf); + return ret; + } + + image->arch.backup_buf = buf; + image->arch.backup_start = kbuf->mem; + return 0; +} + +/** + * load_crashdump_segments_ppc64 - Initialize the additional segements needed + * to load kdump kernel. + * @image: Kexec image. + * @kbuf: Buffer contents and memory parameters. + * + * Returns 0 on success, negative errno on error. + */ +int load_crashdump_segments_ppc64(struct kimage *image, + struct kexec_buf *kbuf) +{ + int ret; + + /* Load backup segment - first 64K bytes of the crashing kernel */ + ret = load_backup_segment(image, kbuf); + if (ret) { + pr_err("Failed to load backup segment\n"); + return ret; + } + pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem); + + return 0; +} + /** * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global * variables and call setup_purgatory() to initialize @@ -643,6 +709,11 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, goto out; } + /* Tell purgatory where to look for backup region */ + ret = kexec_purgatory_get_set_symbol(image, "backup_start", + &image->arch.backup_start, + sizeof(image->arch.backup_start), + false); out: if (ret) pr_err("Failed to setup purgatory symbols"); @@ -674,7 +745,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, /* * Restrict memory usage for kdump kernel by setting up - * usable memory ranges. + * usable memory ranges and memory reserve map. */ if (image->type == KEXEC_TYPE_CRASH) { ret = get_usable_memory_ranges(&umem); @@ -687,13 +758,26 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, goto out; } - /* Ensure we don't touch crashed kernel's memory */ - ret = fdt_add_mem_rsv(fdt, 0, crashk_res.start); + /* + * Ensure we don't touch crashed kernel's memory except the + * first 64K of RAM, which will be backed up. + */ + ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1, + crashk_res.start - BACKUP_SRC_SIZE); if (ret) { pr_err("Error reserving crash memory: %s\n", fdt_strerror(ret)); goto out; } + + /* Ensure backup region is not used by kdump/capture kernel */ + ret = fdt_add_mem_rsv(fdt, image->arch.backup_start, + BACKUP_SRC_SIZE); + if (ret) { + pr_err("Error reserving memory for backup: %s\n", + fdt_strerror(ret)); + goto out; + } } out: @@ -800,5 +884,8 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) kfree(image->arch.exclude_ranges); image->arch.exclude_ranges = NULL; + vfree(image->arch.backup_buf); + image->arch.backup_buf = NULL; + return kexec_image_post_load_cleanup_default(image); } diff --git a/arch/powerpc/purgatory/trampoline_64.S b/arch/powerpc/purgatory/trampoline_64.S index a5a83c3f53e6..b567da56dde1 100644 --- a/arch/powerpc/purgatory/trampoline_64.S +++ b/arch/powerpc/purgatory/trampoline_64.S @@ -10,6 +10,7 @@ */ #include +#include .machine ppc64 .balign 256 @@ -43,14 +44,39 @@ master: mr %r17,%r3 /* save cpu id to r17 */ mr %r15,%r4 /* save physical address in reg15 */ + /* Work out where we're running */ + bcl 20, 31, 0f +0: mflr %r18 + + /* + * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to + * backup_start 8 bytes at a time. + * + * Use r3 = dest, r4 = src, r5 = size, r6 = count + */ + ld %r3, (backup_start - 0b)(%r18) + cmpdi %cr0, %r3, 0 + beq .Lskip_copy /* skip if there is no backup region */ + lis %r5, BACKUP_SRC_SIZE@h + ori %r5, %r5, BACKUP_SRC_SIZE@l + cmpdi %cr0, %r5, 0 + beq .Lskip_copy /* skip if copy size is zero */ + lis %r4, BACKUP_SRC_START@h + ori %r4, %r4, BACKUP_SRC_START@l + li %r6, 0 +.Lcopy_loop: + ldx %r0, %r6, %r4 + stdx %r0, %r6, %r3 + addi %r6, %r6, 8 + cmpld %cr0, %r6, %r5 + blt .Lcopy_loop + +.Lskip_copy: or %r3,%r3,%r3 /* ok now to high priority, lets boot */ lis %r6,0x1 mtctr %r6 /* delay a bit for slaves to catch up */ bdnz . /* before we overwrite 0-100 again */ - bl 0f /* Work out where we're running */ -0: mflr %r18 - /* load device-tree address */ ld %r3, (dt_offset - 0b)(%r18) mr %r16,%r3 /* save dt address in reg16 */ @@ -89,7 +115,6 @@ master: rfid /* update MSR and start kernel */ - .balign 8 .globl kernel kernel: @@ -102,6 +127,11 @@ dt_offset: .8byte 0x0 .size dt_offset, . - dt_offset + .balign 8 + .globl backup_start +backup_start: + .8byte 0x0 + .size backup_start, . - backup_start .data .balign 8 -- cgit v1.2.3 From cb350c1f1f867db16725f1bb06be033ece19e998 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:13:14 +0530 Subject: powerpc/kexec_file: Prepare elfcore header for crashing kernel Prepare elf headers for the crashing kernel's core file using crash_prepare_elf64_headers() and pass on this info to kdump kernel by updating its command line with elfcorehdr parameter. Also, add elfcorehdr location to reserve map to avoid it from being stomped on while booting. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann [mpe: Ensure cmdline is nul terminated] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602298855.575379.15819225623219909517.stgit@hbathini --- arch/powerpc/include/asm/kexec.h | 6 ++ arch/powerpc/kexec/elf_64.c | 12 +++ arch/powerpc/kexec/file_load.c | 51 ++++++++++++ arch/powerpc/kexec/file_load_64.c | 165 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 234 insertions(+) diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 807bb58ed8fa..55d6ede30c19 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -108,12 +108,18 @@ struct kimage_arch { unsigned long backup_start; void *backup_buf; + unsigned long elfcorehdr_addr; + unsigned long elf_headers_sz; + void *elf_headers; + #ifdef CONFIG_IMA_KEXEC phys_addr_t ima_buffer_addr; size_t ima_buffer_size; #endif }; +char *setup_kdump_cmdline(struct kimage *image, char *cmdline, + unsigned long cmdline_len); int setup_purgatory(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 76e2fc7e6dc3..d0e459bb2f05 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -35,6 +35,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, void *fdt; const void *slave_code; struct elfhdr ehdr; + char *modified_cmdline = NULL; struct kexec_elf_info elf_info; struct kexec_buf kbuf = { .image = image, .buf_min = 0, .buf_max = ppc64_rma_size }; @@ -75,6 +76,16 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_err("Failed to load kdump kernel segments\n"); goto out; } + + /* Setup cmdline for kdump kernel case */ + modified_cmdline = setup_kdump_cmdline(image, cmdline, + cmdline_len); + if (!modified_cmdline) { + pr_err("Setting up cmdline for kdump kernel failed\n"); + ret = -EINVAL; + goto out; + } + cmdline = modified_cmdline; } if (initrd != NULL) { @@ -131,6 +142,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_err("Error setting up the purgatory.\n"); out: + kfree(modified_cmdline); kexec_free_elf_info(&elf_info); /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */ diff --git a/arch/powerpc/kexec/file_load.c b/arch/powerpc/kexec/file_load.c index 38439aba27d7..9a232bc36c8f 100644 --- a/arch/powerpc/kexec/file_load.c +++ b/arch/powerpc/kexec/file_load.c @@ -18,10 +18,47 @@ #include #include #include +#include #include #define SLAVE_CODE_SIZE 256 /* First 0x100 bytes */ +/** + * setup_kdump_cmdline - Prepend "elfcorehdr= " to command line + * of kdump kernel for exporting the core. + * @image: Kexec image + * @cmdline: Command line parameters to update. + * @cmdline_len: Length of the cmdline parameters. + * + * kdump segment must be setup before calling this function. + * + * Returns new cmdline buffer for kdump kernel on success, NULL otherwise. + */ +char *setup_kdump_cmdline(struct kimage *image, char *cmdline, + unsigned long cmdline_len) +{ + int elfcorehdr_strlen; + char *cmdline_ptr; + + cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL); + if (!cmdline_ptr) + return NULL; + + elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ", + image->arch.elfcorehdr_addr); + + if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) { + pr_err("Appending elfcorehdr= exceeds cmdline size\n"); + kfree(cmdline_ptr); + return NULL; + } + + memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len); + // Ensure it's nul terminated + cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0'; + return cmdline_ptr; +} + /** * setup_purgatory - initialize the purgatory's global variables * @image: kexec image. @@ -221,6 +258,20 @@ int setup_new_fdt(const struct kimage *image, void *fdt, } } + if (image->type == KEXEC_TYPE_CRASH) { + /* + * Avoid elfcorehdr from being stomped on in kdump kernel by + * setting up memory reserve map. + */ + ret = fdt_add_mem_rsv(fdt, image->arch.elfcorehdr_addr, + image->arch.elf_headers_sz); + if (ret) { + pr_err("Error reserving elfcorehdr memory: %s\n", + fdt_strerror(ret)); + goto err; + } + } + ret = setup_ima_buffer(image, fdt, chosen_node); if (ret) { pr_err("Error setting up the new device tree.\n"); diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index a81bffb72cc5..0d280d097cd6 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -128,6 +128,83 @@ out: return ret; } +/** + * get_crash_memory_ranges - Get crash memory ranges. This list includes + * first/crashing kernel's memory regions that + * would be exported via an elfcore. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +static int get_crash_memory_ranges(struct crash_mem **mem_ranges) +{ + struct memblock_region *reg; + struct crash_mem *tmem; + int ret; + + for_each_memblock(memory, reg) { + u64 base, size; + + base = (u64)reg->base; + size = (u64)reg->size; + + /* Skip backup memory region, which needs a separate entry */ + if (base == BACKUP_SRC_START) { + if (size > BACKUP_SRC_SIZE) { + base = BACKUP_SRC_END + 1; + size -= BACKUP_SRC_SIZE; + } else + continue; + } + + ret = add_mem_range(mem_ranges, base, size); + if (ret) + goto out; + + /* Try merging adjacent ranges before reallocation attempt */ + if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) + sort_memory_ranges(*mem_ranges, true); + } + + /* Reallocate memory ranges if there is no space to split ranges */ + tmem = *mem_ranges; + if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { + tmem = realloc_mem_ranges(mem_ranges); + if (!tmem) + goto out; + } + + /* Exclude crashkernel region */ + ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); + if (ret) + goto out; + + /* + * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL + * regions are exported to save their context at the time of + * crash, they should actually be backed up just like the + * first 64K bytes of memory. + */ + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + /* create a separate program header for the backup region */ + ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE); + if (ret) + goto out; + + sort_memory_ranges(*mem_ranges, false); +out: + if (ret) + pr_err("Failed to setup crash memory ranges\n"); + return ret; +} + /** * __locate_mem_hole_top_down - Looks top down for a large enough memory hole * in the memory regions between buf_min & buf_max @@ -647,6 +724,81 @@ static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf) return 0; } +/** + * update_backup_region_phdr - Update backup region's offset for the core to + * export the region appropriately. + * @image: Kexec image. + * @ehdr: ELF core header. + * + * Assumes an exclusive program header is setup for the backup region + * in the ELF headers + * + * Returns nothing. + */ +static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr) +{ + Elf64_Phdr *phdr; + unsigned int i; + + phdr = (Elf64_Phdr *)(ehdr + 1); + for (i = 0; i < ehdr->e_phnum; i++) { + if (phdr->p_paddr == BACKUP_SRC_START) { + phdr->p_offset = image->arch.backup_start; + pr_debug("Backup region offset updated to 0x%lx\n", + image->arch.backup_start); + return; + } + } +} + +/** + * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr + * segment needed to load kdump kernel. + * @image: Kexec image. + * @kbuf: Buffer contents and memory parameters. + * + * Returns 0 on success, negative errno on error. + */ +static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf) +{ + struct crash_mem *cmem = NULL; + unsigned long headers_sz; + void *headers = NULL; + int ret; + + ret = get_crash_memory_ranges(&cmem); + if (ret) + goto out; + + /* Setup elfcorehdr segment */ + ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz); + if (ret) { + pr_err("Failed to prepare elf headers for the core\n"); + goto out; + } + + /* Fix the offset for backup region in the ELF header */ + update_backup_region_phdr(image, headers); + + kbuf->buffer = headers; + kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; + kbuf->bufsz = kbuf->memsz = headers_sz; + kbuf->top_down = false; + + ret = kexec_add_buffer(kbuf); + if (ret) { + vfree(headers); + goto out; + } + + image->arch.elfcorehdr_addr = kbuf->mem; + image->arch.elf_headers_sz = headers_sz; + image->arch.elf_headers = headers; +out: + kfree(cmem); + return ret; +} + /** * load_crashdump_segments_ppc64 - Initialize the additional segements needed * to load kdump kernel. @@ -668,6 +820,15 @@ int load_crashdump_segments_ppc64(struct kimage *image, } pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem); + /* Load elfcorehdr segment - to export crashing kernel's vmcore */ + ret = load_elfcorehdr_segment(image, kbuf); + if (ret) { + pr_err("Failed to load elfcorehdr segment\n"); + return ret; + } + pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n", + image->arch.elfcorehdr_addr, kbuf->bufsz, kbuf->memsz); + return 0; } @@ -887,5 +1048,9 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image) vfree(image->arch.backup_buf); image->arch.backup_buf = NULL; + vfree(image->arch.elf_headers); + image->arch.elf_headers = NULL; + image->arch.elf_headers_sz = 0; + return kexec_image_post_load_cleanup_default(image); } -- cgit v1.2.3 From 6ecd0163d36049b5f2435a8658f1320c9f3f2924 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:13:33 +0530 Subject: powerpc/kexec_file: Add appropriate regions for memory reserve map While initrd, elfcorehdr and backup regions are already added to the reserve map, there are a few missing regions that need to be added to the memory reserve map. Add them here. And now that all the changes to load panic kernel are in place, claim likewise. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602300473.575379.4218568032039284448.stgit@hbathini --- arch/powerpc/kexec/file_load_64.c | 58 +++++++++++++++++++++++++++++++++++---- 1 file changed, 53 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 0d280d097cd6..f13c5b8399e1 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -205,6 +205,34 @@ out: return ret; } +/** + * get_reserved_memory_ranges - Get reserve memory ranges. This list includes + * memory regions that should be added to the + * memory reserve map to ensure the region is + * protected from any mischief. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +static int get_reserved_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_tce_mem_ranges(mem_ranges); + if (ret) + goto out; + + ret = add_reserved_mem_ranges(mem_ranges); +out: + if (ret) + pr_err("Failed to setup reserved memory ranges\n"); + return ret; +} + /** * __locate_mem_hole_top_down - Looks top down for a large enough memory hole * in the memory regions between buf_min & buf_max @@ -897,8 +925,8 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline) { - struct crash_mem *umem = NULL; - int ret; + struct crash_mem *umem = NULL, *rmem = NULL; + int i, nr_ranges, ret; ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline); if (ret) @@ -941,7 +969,27 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, } } + /* Update memory reserve map */ + ret = get_reserved_memory_ranges(&rmem); + if (ret) + goto out; + + nr_ranges = rmem ? rmem->nr_ranges : 0; + for (i = 0; i < nr_ranges; i++) { + u64 base, size; + + base = rmem->ranges[i].start; + size = rmem->ranges[i].end - base + 1; + ret = fdt_add_mem_rsv(fdt, base, size); + if (ret) { + pr_err("Error updating memory reserve map: %s\n", + fdt_strerror(ret)); + goto out; + } + } + out: + kfree(rmem); kfree(umem); return ret; } @@ -1024,10 +1072,10 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, /* Get exclude memory ranges needed for setting up kdump segments */ ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges)); - if (ret) + if (ret) { pr_err("Failed to setup exclude memory ranges for buffer lookup\n"); - /* Return this until all changes for panic kernel are in */ - return -EOPNOTSUPP; + return ret; + } } return kexec_image_probe_default(image, buf, buf_len); -- cgit v1.2.3 From b5667d13be8d0928a02b46e0c6f7ab891d32f697 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:13:49 +0530 Subject: powerpc/kexec_file: Fix kexec load failure with lack of memory hole The kexec purgatory has to run in real mode. Only the first memory block maybe accessible in real mode. And, unlike the case with panic kernel, no memory is set aside for regular kexec load. Another thing to note is, the memory for crashkernel is reserved at an offset of 128MB. So, when crashkernel memory is reserved, the memory ranges to load kexec segments shrink further as the generic code only looks for memblock free memory ranges and in all likelihood only a tiny bit of memory from 0 to 128MB would be available to load kexec segments. With kdump being used by default in general, kexec file load is likely to fail almost always. This can be fixed by changing the memory hole lookup logic for regular kexec to use the same method as kdump. This would mean that most kexec segments will overlap with crashkernel memory region. That should still be ok as the pages, whose destination address isn't available while loading, are placed in an intermediate location till a flush to the actual destination address happens during kexec boot sequence. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602302326.575379.14038896654942043093.stgit@hbathini --- arch/powerpc/kexec/file_load_64.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index f13c5b8399e1..c6a37ad5a0a4 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -1012,13 +1012,6 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) u64 buf_min, buf_max; int ret; - /* - * Use the generic kexec_locate_mem_hole for regular - * kexec_file_load syscall - */ - if (kbuf->image->type != KEXEC_TYPE_CRASH) - return kexec_locate_mem_hole(kbuf); - /* Look up the exclude ranges list while locating the memory hole */ emem = &(kbuf->image->arch.exclude_ranges); if (!(*emem) || ((*emem)->nr_ranges == 0)) { @@ -1026,11 +1019,15 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) return kexec_locate_mem_hole(kbuf); } + buf_min = kbuf->buf_min; + buf_max = kbuf->buf_max; /* Segments for kdump kernel should be within crashkernel region */ - buf_min = (kbuf->buf_min < crashk_res.start ? - crashk_res.start : kbuf->buf_min); - buf_max = (kbuf->buf_max > crashk_res.end ? - crashk_res.end : kbuf->buf_max); + if (kbuf->image->type == KEXEC_TYPE_CRASH) { + buf_min = (buf_min < crashk_res.start ? + crashk_res.start : buf_min); + buf_max = (buf_max > crashk_res.end ? + crashk_res.end : buf_max); + } if (buf_min > buf_max) { pr_err("Invalid buffer min and/or max values\n"); @@ -1067,15 +1064,13 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) { - if (image->type == KEXEC_TYPE_CRASH) { - int ret; + int ret; - /* Get exclude memory ranges needed for setting up kdump segments */ - ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges)); - if (ret) { - pr_err("Failed to setup exclude memory ranges for buffer lookup\n"); - return ret; - } + /* Get exclude memory ranges needed for setting up kexec segments */ + ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges)); + if (ret) { + pr_err("Failed to setup exclude memory ranges for buffer lookup\n"); + return ret; } return kexec_image_probe_default(image, buf, buf_len); -- cgit v1.2.3 From 2e6bd221d96fcfd9bd1eed5cd9c008e7959daed7 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:14:06 +0530 Subject: powerpc/kexec_file: Enable early kernel OPAL calls Kernels built with CONFIG_PPC_EARLY_DEBUG_OPAL enabled expects r8 & r9 to be filled with OPAL base & entry addresses respectively. Setting these registers allows the kernel to perform OPAL calls before the device tree is parsed. Signed-off-by: Hari Bathini Reviewed-by: Thiago Jung Bauermann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602303975.575379.5032301944162937479.stgit@hbathini --- arch/powerpc/kexec/file_load_64.c | 20 ++++++++++++++++++++ arch/powerpc/purgatory/trampoline_64.S | 16 ++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index c6a37ad5a0a4..53bb71e3a2e1 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -876,6 +876,7 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr) { + struct device_node *dn = NULL; int ret; ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr, @@ -903,9 +904,28 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, &image->arch.backup_start, sizeof(image->arch.backup_start), false); + if (ret) + goto out; + + /* Setup OPAL base & entry values */ + dn = of_find_node_by_path("/ibm,opal"); + if (dn) { + u64 val; + + of_property_read_u64(dn, "opal-base-address", &val); + ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val, + sizeof(val), false); + if (ret) + goto out; + + of_property_read_u64(dn, "opal-entry-address", &val); + ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val, + sizeof(val), false); + } out: if (ret) pr_err("Failed to setup purgatory symbols"); + of_node_put(dn); return ret; } diff --git a/arch/powerpc/purgatory/trampoline_64.S b/arch/powerpc/purgatory/trampoline_64.S index b567da56dde1..d956b8a35fd1 100644 --- a/arch/powerpc/purgatory/trampoline_64.S +++ b/arch/powerpc/purgatory/trampoline_64.S @@ -87,6 +87,10 @@ master: li %r4,28 STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ 1: + /* Load opal base and entry values in r8 & r9 respectively */ + ld %r8,(opal_base - 0b)(%r18) + ld %r9,(opal_entry - 0b)(%r18) + /* load the kernel address */ ld %r4,(kernel - 0b)(%r18) @@ -133,6 +137,18 @@ backup_start: .8byte 0x0 .size backup_start, . - backup_start + .balign 8 + .globl opal_base +opal_base: + .8byte 0x0 + .size opal_base, . - opal_base + + .balign 8 + .globl opal_entry +opal_entry: + .8byte 0x0 + .size opal_entry, . - opal_entry + .data .balign 8 .globl purgatory_sha256_digest -- cgit v1.2.3 From aff779515a070df7e23da9e86f1096f7d10d647e Mon Sep 17 00:00:00 2001 From: Vladis Dronov Date: Wed, 29 Jul 2020 15:37:41 +0200 Subject: powerpc: fix function annotations to avoid section mismatch warnings with gcc-10 Certain warnings are emitted for powerpc code when building with a gcc-10 toolset: WARNING: modpost: vmlinux.o(.text.unlikely+0x377c): Section mismatch in reference from the function remove_pmd_table() to the function .meminit.text:split_kernel_mapping() The function remove_pmd_table() references the function __meminit split_kernel_mapping(). This is often because remove_pmd_table lacks a __meminit annotation or the annotation of split_kernel_mapping is wrong. Add the appropriate __init and __meminit annotations to make modpost not complain. In all the cases there are just a single callsite from another __init or __meminit function: __meminit remove_pagetable() -> remove_pud_table() -> remove_pmd_table() __init prom_init() -> setup_secure_guest() __init xive_spapr_init() -> xive_spapr_disabled() Signed-off-by: Vladis Dronov Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200729133741.62789-1-vdronov@redhat.com --- arch/powerpc/kernel/prom_init.c | 4 ++-- arch/powerpc/mm/book3s64/radix_pgtable.c | 4 ++-- arch/powerpc/sysdev/xive/spapr.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index f279a1f58fa7..ae7ec9903191 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -3270,7 +3270,7 @@ static int enter_secure_mode(unsigned long kbase, unsigned long fdt) /* * Call the Ultravisor to transfer us to secure memory if we have an ESM blob. */ -static void setup_secure_guest(unsigned long kbase, unsigned long fdt) +static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt) { int ret; @@ -3300,7 +3300,7 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt) } } #else -static void setup_secure_guest(unsigned long kbase, unsigned long fdt) +static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt) { } #endif /* CONFIG_PPC_SVM */ diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index a8f46a686219..28c784976bed 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -823,7 +823,7 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, } } -static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, +static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end) { unsigned long next; @@ -853,7 +853,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, } } -static void remove_pud_table(pud_t *pud_start, unsigned long addr, +static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end) { unsigned long next; diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index f0551a2be9df..1e3674d7ea7b 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -768,7 +768,7 @@ static const u8 *get_vec5_feature(unsigned int index) return vec5 + index; } -static bool xive_spapr_disabled(void) +static bool __init xive_spapr_disabled(void) { const u8 *vec5_xive; -- cgit v1.2.3 From e2b3c165f27a6bdb197b0dc86683ed36f61c5527 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 27 Jun 2019 00:15:34 -0500 Subject: powerpc/cacheinfo: Set pr_fmt() Set pr_fmt() so we get a nice prefix on messages. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190627051537.7298-2-nathanl@linux.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index d8d4552af30a..751a4ffcd588 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -7,6 +7,8 @@ * Author: Nathan Lynch */ +#define pr_fmt(fmt) "cacheinfo: " fmt + #include #include #include -- cgit v1.2.3 From be6f885e97e9304541057fbf25148685847ef310 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 27 Jun 2019 00:15:35 -0500 Subject: powerpc/cacheinfo: Use name@unit instead of full DT path in debug messages We know that every OF node we deal with in this code is under /cpus, so we can make the debug messages a little less verbose without losing information. E.g. cacheinfo: creating L1 dcache and icache for /cpus/PowerPC,POWER8@0 cacheinfo: creating L2 ucache for /cpus/l2-cache@2006 cacheinfo: creating L3 ucache for /cpus/l3-cache@3106 becomes cacheinfo: creating L1 dcache and icache for PowerPC,POWER8@0 cacheinfo: creating L2 ucache for l2-cache@2006 cacheinfo: creating L3 ucache for l3-cache@3106 Replace all '%pOF' specifiers with '%pOFP'. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190627051537.7298-3-nathanl@linux.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 751a4ffcd588..879154c839f3 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -168,7 +168,7 @@ static void release_cache_debugcheck(struct cache *cache) list_for_each_entry(iter, &cache_list, list) WARN_ONCE(iter->next_local == cache, - "cache for %pOF(%s) refers to cache for %pOF(%s)\n", + "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n", iter->ofnode, cache_type_string(iter), cache->ofnode, @@ -180,7 +180,7 @@ static void release_cache(struct cache *cache) if (!cache) return; - pr_debug("freeing L%d %s cache for %pOF\n", cache->level, + pr_debug("freeing L%d %s cache for %pOFP\n", cache->level, cache_type_string(cache), cache->ofnode); release_cache_debugcheck(cache); @@ -195,7 +195,7 @@ static void cache_cpu_set(struct cache *cache, int cpu) while (next) { WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), - "CPU %i already accounted in %pOF(%s)\n", + "CPU %i already accounted in %pOFP(%s)\n", cpu, next->ofnode, cache_type_string(next)); cpumask_set_cpu(cpu, &next->shared_cpu_map); @@ -354,7 +354,7 @@ static int cache_is_unified_d(const struct device_node *np) static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) { - pr_debug("creating L%d ucache for %pOF\n", level, node); + pr_debug("creating L%d ucache for %pOFP\n", level, node); return new_cache(cache_is_unified_d(node), level, node); } @@ -364,7 +364,7 @@ static struct cache *cache_do_one_devnode_split(struct device_node *node, { struct cache *dcache, *icache; - pr_debug("creating L%d dcache and icache for %pOF\n", level, + pr_debug("creating L%d dcache and icache for %pOFP\n", level, node); dcache = new_cache(CACHE_TYPE_DATA, level, node); @@ -746,13 +746,13 @@ static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) rc = attr->show(&dir->kobj, attr, buf); if (rc <= 0) { pr_debug("not creating %s attribute for " - "%pOF(%s) (rc = %zd)\n", + "%pOFP(%s) (rc = %zd)\n", attr->attr.name, cache->ofnode, cache_type, rc); continue; } if (sysfs_create_file(&dir->kobj, &attr->attr)) - pr_debug("could not create %s attribute for %pOF(%s)\n", + pr_debug("could not create %s attribute for %pOFP(%s)\n", attr->attr.name, cache->ofnode, cache_type); } @@ -868,7 +868,7 @@ static void cache_cpu_clear(struct cache *cache, int cpu) struct cache *next = cache->next_local; WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), - "CPU %i not accounted in %pOF(%s)\n", + "CPU %i not accounted in %pOFP(%s)\n", cpu, cache->ofnode, cache_type_string(cache)); -- cgit v1.2.3 From 1b3da8ffaa158e9a95c19b17c14d7259d58bc0cd Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 27 Jun 2019 00:15:36 -0500 Subject: powerpc/cacheinfo: Improve diagnostics about malformed cache lists If we have a bug which causes us to start with the wrong kind of OF node when linking up the cache tree, it's helpful for debugging to print information about what we found vs what we expected. So replace uses of WARN_ON_ONCE with WARN_ONCE, which lets us include an informative message instead of a contentless backtrace. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190627051537.7298-4-nathanl@linux.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 879154c839f3..a116d5bb6b12 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -424,8 +424,14 @@ static void link_cache_lists(struct cache *smaller, struct cache *bigger) static void do_subsidiary_caches_debugcheck(struct cache *cache) { - WARN_ON_ONCE(cache->level != 1); - WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu")); + WARN_ONCE(cache->level != 1, + "instantiating cache chain from L%d %s cache for " + "%pOFP instead of an L1\n", cache->level, + cache_type_string(cache), cache->ofnode); + WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), + "instantiating cache chain from node %pOFP of type '%s' " + "instead of a cpu node\n", cache->ofnode, + of_node_get_device_type(cache->ofnode)); } static void do_subsidiary_caches(struct cache *cache) -- cgit v1.2.3 From 6ec54363f198aae9c1343f82ff5b865546944a73 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 27 Jun 2019 00:15:37 -0500 Subject: powerpc/cacheinfo: Warn if cache object chain becomes unordered This can catch cases where the device tree has gotten mishandled into an inconsistent state at runtime, e.g. the cache nodes for both the source and the destination are present after a migration. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190627051537.7298-5-nathanl@linux.ibm.com --- arch/powerpc/kernel/cacheinfo.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index a116d5bb6b12..65ab9fcebd31 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -420,6 +420,15 @@ static void link_cache_lists(struct cache *smaller, struct cache *bigger) } smaller->next_local = bigger; + + /* + * The cache->next_local list sorts by level ascending: + * L1d -> L1i -> L2 -> L3 ... + */ + WARN_ONCE((smaller->level == 1 && bigger->level > 2) || + (smaller->level > 1 && bigger->level != smaller->level + 1), + "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n", + smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); } static void do_subsidiary_caches_debugcheck(struct cache *cache) -- cgit v1.2.3 From 494a66f34e00b6a1897b5a1ab150a19265696b17 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 27 Jun 2019 00:30:43 -0500 Subject: powerpc/pseries/mobility: Set pr_fmt() The pr_err() callsites in mobility.c already manually include a "mobility:" prefix, let's make it official for the benefit of messages to be added later. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190627053044.9238-2-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 78cd772a579b..0ede49a35121 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -6,6 +6,9 @@ * Copyright (C) 2010 IBM Corporation */ + +#define pr_fmt(fmt) "mobility: " fmt + #include #include #include @@ -396,11 +399,11 @@ static int __init mobility_sysfs_init(void) rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr); if (rc) - pr_err("mobility: unable to create migration sysfs file (%d)\n", rc); + pr_err("unable to create migration sysfs file (%d)\n", rc); rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr); if (rc) - pr_err("mobility: unable to create api_version sysfs file (%d)\n", rc); + pr_err("unable to create api_version sysfs file (%d)\n", rc); return 0; } -- cgit v1.2.3 From 5d8b1f9dea17b4bf5e5f088f39eeab32c7e487be Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 27 Jun 2019 00:30:44 -0500 Subject: powerpc/pseries/mobility: Add pr_debug() for device tree changes When investigating issues with partition migration or resource reassignments it is helpful to have a log of which nodes and properties in the device tree have changed. Use pr_debug() so it's easy to enable these at runtime with the dynamic debug facility. Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190627053044.9238-3-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 0ede49a35121..d6f4162478a5 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -67,6 +67,8 @@ static int delete_dt_node(__be32 phandle) if (!dn) return -ENOENT; + pr_debug("removing node %pOFfp\n", dn); + dlpar_detach_node(dn); of_node_put(dn); return 0; @@ -125,6 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, } if (!more) { + pr_debug("updating node %pOF property %s\n", dn, name); of_update_property(dn, new_prop); *prop = NULL; } @@ -243,6 +246,8 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) if (rc) dlpar_free_cc_nodes(dn); + pr_debug("added node %pOFfp\n", dn); + of_node_put(parent_dn); return rc; } -- cgit v1.2.3 From a0ff72f9f5a780341e7ff5e9ba50a0dad5fa1980 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 19 Sep 2019 18:16:33 -0500 Subject: powerpc/pseries/hotplug-cpu: Remove double free in error path In the unlikely event that the device tree lacks a /cpus node, find_dlpar_cpus_to_add() oddly frees the cpu_drcs buffer it has been passed before returning an error. Its only caller also frees the buffer on error. Remove the less conventional kfree() of a caller-supplied buffer from find_dlpar_cpus_to_add(). Fixes: 90edf184b9b7 ("powerpc/pseries: Add CPU dlpar add functionality") Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190919231633.1344-1-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/hotplug-cpu.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 4bad7a83addc..c6e0d8abf75e 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -737,7 +737,6 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add) parent = of_find_node_by_path("/cpus"); if (!parent) { pr_warn("Could not find CPU root node in device tree\n"); - kfree(cpu_drcs); return -1; } -- cgit v1.2.3 From d3a133aa0e029e0bbb67170f5f18c8fcd4701370 Mon Sep 17 00:00:00 2001 From: Michal Suchanek Date: Mon, 6 Apr 2020 23:00:22 +0200 Subject: powerpc/perf: Consolidate perf_callchain_user_[64|32]() perf_callchain_user_64() and perf_callchain_user_32() are nearly identical. Consolidate into one function with thin wrappers. Suggested-by: Nicholas Piggin Signed-off-by: Michal Suchanek [mpe: Adapt to copy_from_user_nofault(), minor formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200406210022.32265-1-msuchanek@suse.de --- arch/powerpc/perf/callchain.h | 25 ++++++++++++++++++++++++- arch/powerpc/perf/callchain_32.c | 21 ++------------------- arch/powerpc/perf/callchain_64.c | 13 +++---------- 3 files changed, 29 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h index 7a2cb9e1181a..ae24d4a00da6 100644 --- a/arch/powerpc/perf/callchain.h +++ b/arch/powerpc/perf/callchain.h @@ -2,7 +2,7 @@ #ifndef _POWERPC_PERF_CALLCHAIN_H #define _POWERPC_PERF_CALLCHAIN_H -int read_user_stack_slow(void __user *ptr, void *buf, int nb); +int read_user_stack_slow(const void __user *ptr, void *buf, int nb); void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, @@ -16,4 +16,27 @@ static inline bool invalid_user_sp(unsigned long sp) return (!sp || (sp & mask) || (sp > top)); } +/* + * On 32-bit we just access the address and let hash_page create a + * HPTE if necessary, so there is no need to fall back to reading + * the page tables. Since this is called at interrupt level, + * do_page_fault() won't treat a DSI as a page fault. + */ +static inline int __read_user_stack(const void __user *ptr, void *ret, + size_t size) +{ + unsigned long addr = (unsigned long)ptr; + int rc; + + if (addr > TASK_SIZE - size || (addr & (size - 1))) + return -EFAULT; + + rc = copy_from_user_nofault(ret, ptr, size); + + if (IS_ENABLED(CONFIG_PPC64) && rc) + return read_user_stack_slow(ptr, ret, size); + + return rc; +} + #endif /* _POWERPC_PERF_CALLCHAIN_H */ diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index 542e68b8eae0..64e4013d8060 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -30,26 +30,9 @@ #endif /* CONFIG_PPC64 */ -/* - * On 32-bit we just access the address and let hash_page create a - * HPTE if necessary, so there is no need to fall back to reading - * the page tables. Since this is called at interrupt level, - * do_page_fault() won't treat a DSI as a page fault. - */ -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) +static int read_user_stack_32(const unsigned int __user *ptr, unsigned int *ret) { - int rc; - - if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || - ((unsigned long)ptr & 3)) - return -EFAULT; - - rc = copy_from_user_nofault(ret, ptr, sizeof(*ret)); - - if (IS_ENABLED(CONFIG_PPC64) && rc) - return read_user_stack_slow(ptr, ret, 4); - - return rc; + return __read_user_stack(ptr, ret, sizeof(*ret)); } /* diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index fa2a1b83b9b0..fed90e827f3a 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -23,7 +23,7 @@ * interrupt context, so if the access faults, we read the page tables * to find which page (if any) is mapped and access it directly. */ -int read_user_stack_slow(void __user *ptr, void *buf, int nb) +int read_user_stack_slow(const void __user *ptr, void *buf, int nb) { unsigned long addr = (unsigned long) ptr; @@ -44,16 +44,9 @@ int read_user_stack_slow(void __user *ptr, void *buf, int nb) return -EFAULT; } -static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) +static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret) { - if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || - ((unsigned long)ptr & 7)) - return -EFAULT; - - if (!copy_from_user_nofault(ret, ptr, sizeof(*ret))) - return 0; - - return read_user_stack_slow(ptr, ret, 8); + return __read_user_stack(ptr, ret, sizeof(*ret)); } /* -- cgit v1.2.3 From dfa03fff86027e58c8dba5c03ae68150d4e513ad Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 30 Jul 2020 10:38:46 +0530 Subject: selftests/powerpc: Fix online CPU selection The size of the CPU affinity mask must be large enough for systems with a very large number of CPUs. Otherwise, tests which try to determine the first online CPU by calling sched_getaffinity() will fail. This makes sure that the size of the allocated affinity mask is dependent on the number of CPUs as reported by get_nprocs_conf(). Fixes: 3752e453f6ba ("selftests/powerpc: Add tests of PMU EBBs") Reported-by: Shirisha Ganta Signed-off-by: Sandipan Das Reviewed-by: Kamalesh Babulal Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a408c4b8e9a23bb39b539417a21eb0ff47bb5127.1596084858.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/utils.c | 37 ++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index 933678f1ed0a..18b6a773d5c7 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -88,28 +89,40 @@ void *get_auxv_entry(int type) int pick_online_cpu(void) { - cpu_set_t mask; - int cpu; + int ncpus, cpu = -1; + cpu_set_t *mask; + size_t size; + + ncpus = get_nprocs_conf(); + size = CPU_ALLOC_SIZE(ncpus); + mask = CPU_ALLOC(ncpus); + if (!mask) { + perror("malloc"); + return -1; + } - CPU_ZERO(&mask); + CPU_ZERO_S(size, mask); - if (sched_getaffinity(0, sizeof(mask), &mask)) { + if (sched_getaffinity(0, size, mask)) { perror("sched_getaffinity"); - return -1; + goto done; } /* We prefer a primary thread, but skip 0 */ - for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = 8; cpu < ncpus; cpu += 8) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; /* Search for anything, but in reverse */ - for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = ncpus - 1; cpu >= 0; cpu--) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; printf("No cpus in affinity mask?!\n"); - return -1; + +done: + CPU_FREE(mask); + return cpu; } bool is_ppc64le(void) -- cgit v1.2.3 From 3af0ada7dd98c6da35c1fd7f107af3b9aa5e904c Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Thu, 30 Jul 2020 11:02:55 +0530 Subject: cpuidle: pseries: Set the latency-hint before entering CEDE As per the PAPR, each H_CEDE call is associated with a latency-hint to be passed in the VPA field "cede_latency_hint". The CEDE states that we were implicitly entering so far is CEDE with latency-hint = 0. This patch explicitly sets the latency hint corresponding to the CEDE state that we are currently entering. While at it, we save the previous hint, to be restored once we wakeup from CEDE. This will be required in the future when we expose extended-cede states through the cpuidle framework, where each of them will have a different cede-latency hint. Signed-off-by: Gautham R. Shenoy [mpe: Make cede_latency_hint static] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1596087177-30329-2-git-send-email-ego@linux.vnet.ibm.com --- drivers/cpuidle/cpuidle-pseries.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 3e058ad2bb51..31c8e51162b2 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -86,19 +86,27 @@ static void check_and_cede_processor(void) } } +#define NR_DEDICATED_STATES 2 /* snooze, CEDE */ + +static u8 cede_latency_hint[NR_DEDICATED_STATES]; + static int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { + u8 old_latency_hint; pseries_idle_prolog(); get_lppaca()->donate_dedicated_cpu = 1; + old_latency_hint = get_lppaca()->cede_latency_hint; + get_lppaca()->cede_latency_hint = cede_latency_hint[index]; HMT_medium(); check_and_cede_processor(); local_irq_disable(); get_lppaca()->donate_dedicated_cpu = 0; + get_lppaca()->cede_latency_hint = old_latency_hint; pseries_idle_epilog(); @@ -130,7 +138,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, /* * States for dedicated partition case. */ -static struct cpuidle_state dedicated_states[] = { +static struct cpuidle_state dedicated_states[NR_DEDICATED_STATES] = { { /* Snooze */ .name = "snooze", .desc = "snooze", @@ -233,7 +241,7 @@ static int pseries_idle_probe(void) max_idle_state = ARRAY_SIZE(shared_states); } else { cpuidle_state_table = dedicated_states; - max_idle_state = ARRAY_SIZE(dedicated_states); + max_idle_state = NR_DEDICATED_STATES; } } else return -ENODEV; -- cgit v1.2.3 From 054e44ba99ae36918631fcbf5f034e466c2f1b73 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Thu, 30 Jul 2020 11:02:56 +0530 Subject: cpuidle: pseries: Add function to parse extended CEDE records Currently we use CEDE with latency-hint 0 as the only other idle state on a dedicated LPAR apart from the polling "snooze" state. The platform might support additional extended CEDE idle states, which can be discovered through the "ibm,get-system-parameter" rtas-call made with CEDE_LATENCY_TOKEN. This patch adds a function to obtain information about the extended CEDE idle states from the platform and parse the contents to populate an array of extended CEDE states. These idle states thus discovered will be added to the cpuidle framework in the next patch. dmesg on a POWER8 and POWER9 LPAR, demonstrating the output of parsing the extended CEDE latency parameters are as follows POWER8 [ 10.093279] xcede : xcede_record_size = 10 [ 10.093285] xcede : Record 0 : hint = 1, latency = 0x3c00 tb ticks, Wake-on-irq = 1 [ 10.093291] xcede : Record 1 : hint = 2, latency = 0x4e2000 tb ticks, Wake-on-irq = 0 [ 10.093297] cpuidle : Skipping the 2 Extended CEDE idle states POWER9 [ 5.913180] xcede : xcede_record_size = 10 [ 5.913183] xcede : Record 0 : hint = 1, latency = 0x400 tb ticks, Wake-on-irq = 1 [ 5.913188] xcede : Record 1 : hint = 2, latency = 0x3e8000 tb ticks, Wake-on-irq = 0 [ 5.913193] cpuidle : Skipping the 2 Extended CEDE idle states Signed-off-by: Gautham R. Shenoy [mpe: Make space for 16 records, drop memset, minor cleanup & formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1596087177-30329-3-git-send-email-ego@linux.vnet.ibm.com --- drivers/cpuidle/cpuidle-pseries.c | 136 +++++++++++++++++++++++++++++++++++++- 1 file changed, 135 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 31c8e51162b2..abfc160c9837 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -21,6 +21,7 @@ #include #include #include +#include static struct cpuidle_driver pseries_idle_driver = { .name = "pseries_idle", @@ -86,8 +87,131 @@ static void check_and_cede_processor(void) } } -#define NR_DEDICATED_STATES 2 /* snooze, CEDE */ +/* + * XCEDE: Extended CEDE states discovered through the + * "ibm,get-systems-parameter" RTAS call with the token + * CEDE_LATENCY_TOKEN + */ + +/* + * Section 7.3.16 System Parameters Option of PAPR version 2.8.1 has a + * table with all the parameters to ibm,get-system-parameters. + * CEDE_LATENCY_TOKEN corresponds to the token value for Cede Latency + * Settings Information. + */ +#define CEDE_LATENCY_TOKEN 45 + +/* + * If the platform supports the cede latency settings information system + * parameter it must provide the following information in the NULL terminated + * parameter string: + * + * a. The first byte is the length ā€œNā€ of each cede latency setting record minus + * one (zero indicates a length of 1 byte). + * + * b. For each supported cede latency setting a cede latency setting record + * consisting of the first ā€œNā€ bytes as per the following table. + * + * ----------------------------- + * | Field | Field | + * | Name | Length | + * ----------------------------- + * | Cede Latency | 1 Byte | + * | Specifier Value | | + * ----------------------------- + * | Maximum wakeup | | + * | latency in | 8 Bytes | + * | tb-ticks | | + * ----------------------------- + * | Responsive to | | + * | external | 1 Byte | + * | interrupts | | + * ----------------------------- + * + * This version has cede latency record size = 10. + * + * The structure xcede_latency_payload represents a) and b) with + * xcede_latency_record representing the table in b). + * + * xcede_latency_parameter is what gets returned by + * ibm,get-systems-parameter RTAS call when made with + * CEDE_LATENCY_TOKEN. + * + * These structures are only used to represent the data obtained by the RTAS + * call. The data is in big-endian. + */ +struct xcede_latency_record { + u8 hint; + __be64 latency_ticks; + u8 wake_on_irqs; +} __packed; + +// Make space for 16 records, which "should be enough". +struct xcede_latency_payload { + u8 record_size; + struct xcede_latency_record records[16]; +} __packed; + +struct xcede_latency_parameter { + __be16 payload_size; + struct xcede_latency_payload payload; + u8 null_char; +} __packed; + +static unsigned int nr_xcede_records; +static struct xcede_latency_parameter xcede_latency_parameter __initdata; + +static int __init parse_cede_parameters(void) +{ + struct xcede_latency_payload *payload; + u32 total_xcede_records_size; + u8 xcede_record_size; + u16 payload_size; + int ret, i; + + ret = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, + NULL, CEDE_LATENCY_TOKEN, __pa(&xcede_latency_parameter), + sizeof(xcede_latency_parameter)); + if (ret) { + pr_err("xcede: Error parsing CEDE_LATENCY_TOKEN\n"); + return ret; + } + + payload_size = be16_to_cpu(xcede_latency_parameter.payload_size); + payload = &xcede_latency_parameter.payload; + xcede_record_size = payload->record_size + 1; + + if (xcede_record_size != sizeof(struct xcede_latency_record)) { + pr_err("xcede: Expected record-size %lu. Observed size %u.\n", + sizeof(struct xcede_latency_record), xcede_record_size); + return -EINVAL; + } + + pr_info("xcede: xcede_record_size = %d\n", xcede_record_size); + + /* + * Since the payload_size includes the last NULL byte and the + * xcede_record_size, the remaining bytes correspond to array of all + * cede_latency settings. + */ + total_xcede_records_size = payload_size - 2; + nr_xcede_records = total_xcede_records_size / xcede_record_size; + + for (i = 0; i < nr_xcede_records; i++) { + struct xcede_latency_record *record = &payload->records[i]; + u64 latency_ticks = be64_to_cpu(record->latency_ticks); + u8 wake_on_irqs = record->wake_on_irqs; + u8 hint = record->hint; + + pr_info("xcede: Record %d : hint = %u, latency = 0x%llx tb ticks, Wake-on-irq = %u\n", + i, hint, latency_ticks, wake_on_irqs); + } + + return 0; +} + +#define NR_DEDICATED_STATES 2 /* snooze, CEDE */ static u8 cede_latency_hint[NR_DEDICATED_STATES]; static int dedicated_cede_loop(struct cpuidle_device *dev, @@ -219,6 +343,15 @@ static int pseries_cpuidle_driver_init(void) return 0; } +static void __init parse_xcede_idle_states(void) +{ + if (parse_cede_parameters()) + return; + + pr_info("cpuidle : Skipping the %d Extended CEDE idle states\n", + nr_xcede_records); +} + /* * pseries_idle_probe() * Choose state table for shared versus dedicated partition @@ -240,6 +373,7 @@ static int pseries_idle_probe(void) cpuidle_state_table = shared_states; max_idle_state = ARRAY_SIZE(shared_states); } else { + parse_xcede_idle_states(); cpuidle_state_table = dedicated_states; max_idle_state = NR_DEDICATED_STATES; } -- cgit v1.2.3 From d947fb4c965cdb7242f3f91124ea16079c49fa8b Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Thu, 30 Jul 2020 11:02:57 +0530 Subject: cpuidle: pseries: Fixup exit latency for CEDE(0) We are currently assuming that CEDE(0) has exit latency 10us, since there is no way for us to query from the platform. However, if the wakeup latency of an Extended CEDE state is smaller than 10us, then we can be sure that the exit latency of CEDE(0) cannot be more than that. In this patch, we fix the exit latency of CEDE(0) if we discover an Extended CEDE state with wakeup latency smaller than 10us. Benchmark results: On POWER8, this patch does not have any impact since the advertized latency of Extended CEDE (1) is 30us which is higher than the default latency of CEDE (0) which is 10us. On POWER9 we see improvement the single-threaded performance of ebizzy, and no regression in the wakeup latency or the number of context-switches. ebizzy: 2 ebizzy threads bound to the same big-core. 25% improvement in the avg records/s with patch. x without_patch * with_patch N Min Max Median Avg Stddev x 10 2491089 5834307 5398375 4244335 1596244.9 * 10 2893813 5834474 5832448 5327281.3 1055941.4 context_switch2: There is no major regression observed with this patch as seen from the context_switch2 benchmark. context_switch2 across CPU0 CPU1 (Both belong to same big-core, but different small cores). We observe a minor 0.14% regression in the number of context-switches (higher is better). x without_patch * with_patch N Min Max Median Avg Stddev x 500 348872 362236 354712 354745.69 2711.827 * 500 349422 361452 353942 354215.4 2576.9258 Difference at 99.0% confidence -530.288 +/- 430.963 -0.149484% +/- 0.121485% (Student's t, pooled s = 2645.24) context_switch2 across CPU0 CPU8 (Different big-cores). We observe a 0.37% improvement in the number of context-switches (higher is better). x without_patch * with_patch N Min Max Median Avg Stddev x 500 287956 294940 288896 288977.23 646.59295 * 500 288300 294646 289582 290064.76 1161.9992 Difference at 99.0% confidence 1087.53 +/- 153.194 0.376337% +/- 0.0530125% (Student's t, pooled s = 940.299) schbench: No major difference could be seen until the 99.9th percentile. Without-patch: Latency percentiles (usec) 50.0th: 29 75.0th: 39 90.0th: 49 95.0th: 59 *99.0th: 13104 99.5th: 14672 99.9th: 15824 min=0, max=17993 With-patch: Latency percentiles (usec) 50.0th: 29 75.0th: 40 90.0th: 50 95.0th: 61 *99.0th: 13648 99.5th: 14768 99.9th: 15664 min=0, max=29812 Signed-off-by: Gautham R. Shenoy [mpe: Minor formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1596087177-30329-4-git-send-email-ego@linux.vnet.ibm.com --- drivers/cpuidle/cpuidle-pseries.c | 45 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index abfc160c9837..ff6d99e923a4 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -343,13 +343,52 @@ static int pseries_cpuidle_driver_init(void) return 0; } -static void __init parse_xcede_idle_states(void) +static void __init fixup_cede0_latency(void) { + struct xcede_latency_payload *payload; + u64 min_latency_us; + int i; + + min_latency_us = dedicated_states[1].exit_latency; // CEDE latency + if (parse_cede_parameters()) return; - pr_info("cpuidle : Skipping the %d Extended CEDE idle states\n", + pr_info("cpuidle: Skipping the %d Extended CEDE idle states\n", nr_xcede_records); + + payload = &xcede_latency_parameter.payload; + for (i = 0; i < nr_xcede_records; i++) { + struct xcede_latency_record *record = &payload->records[i]; + u64 latency_tb = be64_to_cpu(record->latency_ticks); + u64 latency_us = tb_to_ns(latency_tb) / NSEC_PER_USEC; + + if (latency_us < min_latency_us) + min_latency_us = latency_us; + } + + /* + * By default, we assume that CEDE(0) has exit latency 10us, + * since there is no way for us to query from the platform. + * + * However, if the wakeup latency of an Extended CEDE state is + * smaller than 10us, then we can be sure that CEDE(0) + * requires no more than that. + * + * Perform the fix-up. + */ + if (min_latency_us < dedicated_states[1].exit_latency) { + u64 cede0_latency = min_latency_us - 1; + + if (cede0_latency <= 0) + cede0_latency = min_latency_us; + + dedicated_states[1].exit_latency = cede0_latency; + dedicated_states[1].target_residency = 10 * (cede0_latency); + pr_info("cpuidle: Fixed up CEDE exit latency to %llu us\n", + cede0_latency); + } + } /* @@ -373,7 +412,7 @@ static int pseries_idle_probe(void) cpuidle_state_table = shared_states; max_idle_state = ARRAY_SIZE(shared_states); } else { - parse_xcede_idle_states(); + fixup_cede0_latency(); cpuidle_state_table = dedicated_states; max_idle_state = NR_DEDICATED_STATES; } -- cgit v1.2.3 From 2d02bf835e5731de632c8a13567905fa7c0da01c Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Fri, 31 Jul 2020 12:11:52 +0530 Subject: powerpc/papr_scm: Fetch nvdimm performance stats from PHYP Update papr_scm.c to query dimm performance statistics from PHYP via H_SCM_PERFORMANCE_STATS hcall and export them to user-space as PAPR specific NVDIMM attribute 'perf_stats' in sysfs. The patch also provide a sysfs ABI documentation for the stats being reported and their meanings. During NVDIMM probe time in papr_scm_nvdimm_init() a special variant of H_SCM_PERFORMANCE_STATS hcall is issued to check if collection of performance statistics is supported or not. If successful then a PHYP returns a maximum possible buffer length needed to read all performance stats. This returned value is stored in a per-nvdimm attribute 'stat_buffer_len'. The layout of request buffer for reading NVDIMM performance stats from PHYP is defined in 'struct papr_scm_perf_stats' and 'struct papr_scm_perf_stat'. These structs are used in newly introduced drc_pmem_query_stats() that issues the H_SCM_PERFORMANCE_STATS hcall. The sysfs access function perf_stats_show() uses value 'stat_buffer_len' to allocate a buffer large enough to hold all possible NVDIMM performance stats and passes it to drc_pmem_query_stats() to populate. Finally statistics reported in the buffer are formatted into the sysfs access function output buffer. Signed-off-by: Vaibhav Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200731064153.182203-2-vaibhav@linux.ibm.com --- Documentation/ABI/testing/sysfs-bus-papr-pmem | 27 +++++ arch/powerpc/platforms/pseries/papr_scm.c | 150 ++++++++++++++++++++++++++ 2 files changed, 177 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem index 5b10d036a8d4..c1a67275c43f 100644 --- a/Documentation/ABI/testing/sysfs-bus-papr-pmem +++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem @@ -25,3 +25,30 @@ Description: NVDIMM have been scrubbed. * "locked" : Indicating that NVDIMM contents cant be modified until next power cycle. + +What: /sys/bus/nd/devices/nmemX/papr/perf_stats +Date: May, 2020 +KernelVersion: v5.9 +Contact: linuxppc-dev , linux-nvdimm@lists.01.org, +Description: + (RO) Report various performance stats related to papr-scm NVDIMM + device. Each stat is reported on a new line with each line + composed of a stat-identifier followed by it value. Below are + currently known dimm performance stats which are reported: + + * "CtlResCt" : Controller Reset Count + * "CtlResTm" : Controller Reset Elapsed Time + * "PonSecs " : Power-on Seconds + * "MemLife " : Life Remaining + * "CritRscU" : Critical Resource Utilization + * "HostLCnt" : Host Load Count + * "HostSCnt" : Host Store Count + * "HostSDur" : Host Store Duration + * "HostLDur" : Host Load Duration + * "MedRCnt " : Media Read Count + * "MedWCnt " : Media Write Count + * "MedRDur " : Media Read Duration + * "MedWDur " : Media Write Duration + * "CchRHCnt" : Cache Read Hit Count + * "CchWHCnt" : Cache Write Hit Count + * "FastWCnt" : Fast Write Count \ No newline at end of file diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 3d1235a76ba9..f37f3f70007d 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -64,6 +64,26 @@ PAPR_PMEM_HEALTH_FATAL | \ PAPR_PMEM_HEALTH_UNHEALTHY) +#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) +#define PAPR_SCM_PERF_STATS_VERSION 0x1 + +/* Struct holding a single performance metric */ +struct papr_scm_perf_stat { + u8 stat_id[8]; + __be64 stat_val; +} __packed; + +/* Struct exchanged between kernel and PHYP for fetching drc perf stats */ +struct papr_scm_perf_stats { + u8 eye_catcher[8]; + /* Should be PAPR_SCM_PERF_STATS_VERSION */ + __be32 stats_version; + /* Number of stats following */ + __be32 num_statistics; + /* zero or more performance matrics */ + struct papr_scm_perf_stat scm_statistic[]; +} __packed; + /* private struct associated with each region */ struct papr_scm_priv { struct platform_device *pdev; @@ -92,6 +112,9 @@ struct papr_scm_priv { /* Health information for the dimm */ u64 health_bitmap; + + /* length of the stat buffer as expected by phyp */ + size_t stat_buffer_len; }; static LIST_HEAD(papr_nd_regions); @@ -200,6 +223,79 @@ err_out: return drc_pmem_bind(p); } +/* + * Query the Dimm performance stats from PHYP and copy them (if returned) to + * provided struct papr_scm_perf_stats instance 'stats' that can hold atleast + * (num_stats + header) bytes. + * - If buff_stats == NULL the return value is the size in byes of the buffer + * needed to hold all supported performance-statistics. + * - If buff_stats != NULL and num_stats == 0 then we copy all known + * performance-statistics to 'buff_stat' and expect to be large enough to + * hold them. + * - if buff_stats != NULL and num_stats > 0 then copy the requested + * performance-statistics to buff_stats. + */ +static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p, + struct papr_scm_perf_stats *buff_stats, + unsigned int num_stats) +{ + unsigned long ret[PLPAR_HCALL_BUFSIZE]; + size_t size; + s64 rc; + + /* Setup the out buffer */ + if (buff_stats) { + memcpy(buff_stats->eye_catcher, + PAPR_SCM_PERF_STATS_EYECATCHER, 8); + buff_stats->stats_version = + cpu_to_be32(PAPR_SCM_PERF_STATS_VERSION); + buff_stats->num_statistics = + cpu_to_be32(num_stats); + + /* + * Calculate the buffer size based on num-stats provided + * or use the prefetched max buffer length + */ + if (num_stats) + /* Calculate size from the num_stats */ + size = sizeof(struct papr_scm_perf_stats) + + num_stats * sizeof(struct papr_scm_perf_stat); + else + size = p->stat_buffer_len; + } else { + /* In case of no out buffer ignore the size */ + size = 0; + } + + /* Do the HCALL asking PHYP for info */ + rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index, + buff_stats ? virt_to_phys(buff_stats) : 0, + size); + + /* Check if the error was due to an unknown stat-id */ + if (rc == H_PARTIAL) { + dev_err(&p->pdev->dev, + "Unknown performance stats, Err:0x%016lX\n", ret[0]); + return -ENOENT; + } else if (rc != H_SUCCESS) { + dev_err(&p->pdev->dev, + "Failed to query performance stats, Err:%lld\n", rc); + return -EIO; + + } else if (!size) { + /* Handle case where stat buffer size was requested */ + dev_dbg(&p->pdev->dev, + "Performance stats size %ld\n", ret[0]); + return ret[0]; + } + + /* Successfully fetched the requested stats from phyp */ + dev_dbg(&p->pdev->dev, + "Performance stats returned %d stats\n", + be32_to_cpu(buff_stats->num_statistics)); + return 0; +} + /* * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the * health information. @@ -637,6 +733,48 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, return 0; } +static ssize_t perf_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int index, rc; + struct seq_buf s; + struct papr_scm_perf_stat *stat; + struct papr_scm_perf_stats *stats; + struct nvdimm *dimm = to_nvdimm(dev); + struct papr_scm_priv *p = nvdimm_provider_data(dimm); + + if (!p->stat_buffer_len) + return -ENOENT; + + /* Allocate the buffer for phyp where stats are written */ + stats = kzalloc(p->stat_buffer_len, GFP_KERNEL); + if (!stats) + return -ENOMEM; + + /* Ask phyp to return all dimm perf stats */ + rc = drc_pmem_query_stats(p, stats, 0); + if (rc) + goto free_stats; + /* + * Go through the returned output buffer and print stats and + * values. Since stat_id is essentially a char string of + * 8 bytes, simply use the string format specifier to print it. + */ + seq_buf_init(&s, buf, PAGE_SIZE); + for (index = 0, stat = stats->scm_statistic; + index < be32_to_cpu(stats->num_statistics); + ++index, ++stat) { + seq_buf_printf(&s, "%.8s = 0x%016llX\n", + stat->stat_id, + be64_to_cpu(stat->stat_val)); + } + +free_stats: + kfree(stats); + return rc ? rc : seq_buf_used(&s); +} +DEVICE_ATTR_RO(perf_stats); + static ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -682,6 +820,7 @@ DEVICE_ATTR_RO(flags); /* papr_scm specific dimm attributes */ static struct attribute *papr_nd_attributes[] = { &dev_attr_flags.attr, + &dev_attr_perf_stats.attr, NULL, }; @@ -702,6 +841,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) struct nd_region_desc ndr_desc; unsigned long dimm_flags; int target_nid, online_nid; + ssize_t stat_size; p->bus_desc.ndctl = papr_scm_ndctl; p->bus_desc.module = THIS_MODULE; @@ -769,6 +909,16 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) list_add_tail(&p->region_list, &papr_nd_regions); mutex_unlock(&papr_ndr_lock); + /* Try retriving the stat buffer and see if its supported */ + stat_size = drc_pmem_query_stats(p, NULL, 0); + if (stat_size > 0) { + p->stat_buffer_len = stat_size; + dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n", + p->stat_buffer_len); + } else { + dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n"); + } + return 0; err: nvdimm_bus_unregister(p->bus); -- cgit v1.2.3 From af0870c4e75655b1931d0a5ffde2f448a2794362 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Fri, 31 Jul 2020 12:11:53 +0530 Subject: powerpc/papr_scm: Add support for fetching nvdimm 'fuel-gauge' metric We add support for reporting 'fuel-gauge' NVDIMM metric via PAPR_PDSM_HEALTH pdsm payload. 'fuel-gauge' metric indicates the usage life remaining of a papr-scm compatible NVDIMM. PHYP exposes this metric via the H_SCM_PERFORMANCE_STATS. The metric value is returned from the pdsm by extending the return payload 'struct nd_papr_pdsm_health' without breaking the ABI. A new field 'dimm_fuel_gauge' to hold the metric value is introduced at the end of the payload struct and its presence is indicated by by extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID. The patch introduces a new function papr_pdsm_fuel_gauge() that is called from papr_pdsm_health(). If fetching NVDIMM performance stats is supported then 'papr_pdsm_fuel_gauge()' allocated an output buffer large enough to hold the performance stat and passes it to drc_pmem_query_stats() that issues the HCALL to PHYP. The return value of the stat is then populated in the 'struct nd_papr_pdsm_health.dimm_fuel_gauge' field with extension flag 'PDSM_DIMM_HEALTH_RUN_GAUGE_VALID' set in 'struct nd_papr_pdsm_health.extension_flags' Signed-off-by: Vaibhav Jain Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200731064153.182203-3-vaibhav@linux.ibm.com --- arch/powerpc/include/uapi/asm/papr_pdsm.h | 9 ++++++ arch/powerpc/platforms/pseries/papr_scm.c | 49 +++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h index 9ccecc1d6840..50ef95e2f5b1 100644 --- a/arch/powerpc/include/uapi/asm/papr_pdsm.h +++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h @@ -72,6 +72,11 @@ #define PAPR_PDSM_DIMM_CRITICAL 2 #define PAPR_PDSM_DIMM_FATAL 3 +/* struct nd_papr_pdsm_health.extension_flags field flags */ + +/* Indicate that the 'dimm_fuel_gauge' field is valid */ +#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1 + /* * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH * Various flags indicate the health status of the dimm. @@ -84,6 +89,7 @@ * dimm_locked : Contents of the dimm cant be modified until CEC reboot * dimm_encrypted : Contents of dimm are encrypted. * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX + * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100 */ struct nd_papr_pdsm_health { union { @@ -96,6 +102,9 @@ struct nd_papr_pdsm_health { __u8 dimm_locked; __u8 dimm_encrypted; __u16 dimm_health; + + /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */ + __u16 dimm_fuel_gauge; }; __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; }; diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f37f3f70007d..f439f0dfea7d 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -518,6 +518,51 @@ static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf, return 0; } +static int papr_pdsm_fuel_gauge(struct papr_scm_priv *p, + union nd_pdsm_payload *payload) +{ + int rc, size; + u64 statval; + struct papr_scm_perf_stat *stat; + struct papr_scm_perf_stats *stats; + + /* Silently fail if fetching performance metrics isn't supported */ + if (!p->stat_buffer_len) + return 0; + + /* Allocate request buffer enough to hold single performance stat */ + size = sizeof(struct papr_scm_perf_stats) + + sizeof(struct papr_scm_perf_stat); + + stats = kzalloc(size, GFP_KERNEL); + if (!stats) + return -ENOMEM; + + stat = &stats->scm_statistic[0]; + memcpy(&stat->stat_id, "MemLife ", sizeof(stat->stat_id)); + stat->stat_val = 0; + + /* Fetch the fuel gauge and populate it in payload */ + rc = drc_pmem_query_stats(p, stats, 1); + if (rc < 0) { + dev_dbg(&p->pdev->dev, "Err(%d) fetching fuel gauge\n", rc); + goto free_stats; + } + + statval = be64_to_cpu(stat->stat_val); + dev_dbg(&p->pdev->dev, + "Fetched fuel-gauge %llu", statval); + payload->health.extension_flags |= + PDSM_DIMM_HEALTH_RUN_GAUGE_VALID; + payload->health.dimm_fuel_gauge = statval; + + rc = sizeof(struct nd_papr_pdsm_health); + +free_stats: + kfree(stats); + return rc; +} + /* Fetch the DIMM health info and populate it in provided package. */ static int papr_pdsm_health(struct papr_scm_priv *p, union nd_pdsm_payload *payload) @@ -558,6 +603,10 @@ static int papr_pdsm_health(struct papr_scm_priv *p, /* struct populated hence can release the mutex now */ mutex_unlock(&p->health_mutex); + + /* Populate the fuel gauge meter in the payload */ + papr_pdsm_fuel_gauge(p, payload); + rc = sizeof(struct nd_papr_pdsm_health); out: -- cgit v1.2.3 From 8d8a629d00a5283874b81b594f31f8d436dc57d8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 22 Jul 2020 12:24:22 +1000 Subject: powerpc/40x: Fix assembler warning about r0 The assembler says: arch/powerpc/kernel/head_40x.S:623: Warning: invalid register expression It's objecting to the use of r0 as the RA argument. That's because when RA = 0 the literal value 0 is used, rather than the content of r0, making the use of r0 in the source potentially confusing. Fix it to use a literal 0, the generated code is identical. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200722022422.825197-1-mpe@ellerman.id.au --- arch/powerpc/kernel/head_40x.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 926bfa73586a..5b282d9965a5 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -620,7 +620,7 @@ start_here: ori r6, r6, swapper_pg_dir@l lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ + stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) stw r6, 0(r5) -- cgit v1.2.3 From 872d11bca9c29ed19595c993b9f552ffe9b63dcb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 3 Aug 2020 12:07:19 +1000 Subject: selftests/powerpc: Skip vmx/vsx/tar/etc tests on older CPUs Some of our tests use VSX or newer VMX instructions, so need to be skipped on older CPUs to avoid SIGILL'ing. Similarly TAR was added in v2.07, and the PMU event used in the stcx fail test only works on Power8 or later. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200803020719.96114-1-mpe@ellerman.id.au --- tools/testing/selftests/powerpc/math/Makefile | 8 ++++---- tools/testing/selftests/powerpc/math/vmx_preempt.c | 3 +++ tools/testing/selftests/powerpc/math/vmx_signal.c | 3 +++ tools/testing/selftests/powerpc/math/vmx_syscall.c | 7 ++++++- tools/testing/selftests/powerpc/math/vsx_preempt.c | 2 ++ tools/testing/selftests/powerpc/pmu/count_stcx_fail.c | 4 ++++ tools/testing/selftests/powerpc/ptrace/ptrace-tar.c | 3 +++ tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c | 2 ++ tools/testing/selftests/powerpc/stringloops/Makefile | 2 +- tools/testing/selftests/powerpc/stringloops/memcmp.c | 6 ++++++ 10 files changed, 34 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile index 4e2049d2fd8d..fcc91c205984 100644 --- a/tools/testing/selftests/powerpc/math/Makefile +++ b/tools/testing/selftests/powerpc/math/Makefile @@ -11,9 +11,9 @@ $(OUTPUT)/fpu_syscall: fpu_asm.S $(OUTPUT)/fpu_preempt: fpu_asm.S $(OUTPUT)/fpu_signal: fpu_asm.S -$(OUTPUT)/vmx_syscall: vmx_asm.S -$(OUTPUT)/vmx_preempt: vmx_asm.S -$(OUTPUT)/vmx_signal: vmx_asm.S +$(OUTPUT)/vmx_syscall: vmx_asm.S ../utils.c +$(OUTPUT)/vmx_preempt: vmx_asm.S ../utils.c +$(OUTPUT)/vmx_signal: vmx_asm.S ../utils.c $(OUTPUT)/vsx_preempt: CFLAGS += -mvsx -$(OUTPUT)/vsx_preempt: vsx_asm.S +$(OUTPUT)/vsx_preempt: vsx_asm.S ../utils.c diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c index 2e059f154e77..6761d6ce30ec 100644 --- a/tools/testing/selftests/powerpc/math/vmx_preempt.c +++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c @@ -57,6 +57,9 @@ int test_preempt_vmx(void) int i, rc, threads; pthread_t *tids; + // vcmpequd used in vmx_asm.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; tids = malloc(threads * sizeof(pthread_t)); FAIL_IF(!tids); diff --git a/tools/testing/selftests/powerpc/math/vmx_signal.c b/tools/testing/selftests/powerpc/math/vmx_signal.c index 785a48e0976f..b340a5c4e79d 100644 --- a/tools/testing/selftests/powerpc/math/vmx_signal.c +++ b/tools/testing/selftests/powerpc/math/vmx_signal.c @@ -96,6 +96,9 @@ int test_signal_vmx(void) void *rc_p; pthread_t *tids; + // vcmpequd used in vmx_asm.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; tids = malloc(threads * sizeof(pthread_t)); FAIL_IF(!tids); diff --git a/tools/testing/selftests/powerpc/math/vmx_syscall.c b/tools/testing/selftests/powerpc/math/vmx_syscall.c index 9ee293cc868e..03c78dfe3444 100644 --- a/tools/testing/selftests/powerpc/math/vmx_syscall.c +++ b/tools/testing/selftests/powerpc/math/vmx_syscall.c @@ -49,9 +49,14 @@ int test_vmx_syscall(void) * Setup an environment with much context switching */ pid_t pid2; - pid_t pid = fork(); + pid_t pid; int ret; int child_ret; + + // vcmpequd used in vmx_asm.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + + pid = fork(); FAIL_IF(pid == -1); pid2 = fork(); diff --git a/tools/testing/selftests/powerpc/math/vsx_preempt.c b/tools/testing/selftests/powerpc/math/vsx_preempt.c index 63de9c6e2cd3..d1601bb889d4 100644 --- a/tools/testing/selftests/powerpc/math/vsx_preempt.c +++ b/tools/testing/selftests/powerpc/math/vsx_preempt.c @@ -92,6 +92,8 @@ int test_preempt_vsx(void) int i, rc, threads; pthread_t *tids; + SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_VSX)); + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; tids = malloc(threads * sizeof(pthread_t)); FAIL_IF(!tids); diff --git a/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c index 7b4ac4537702..2980abca31e0 100644 --- a/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c +++ b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "event.h" #include "utils.h" @@ -104,6 +105,9 @@ static int test_body(void) struct event events[3]; u64 overhead; + // The STCX_FAIL event we use works on Power8 or later + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions"); setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "cycles"); setup_event(&events[2], PM_STCX_FAIL, PERF_TYPE_RAW, "stcx_fail"); diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c index 58cb1a860cc9..4436ca9d3caf 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c @@ -78,6 +78,9 @@ int ptrace_tar(void) pid_t pid; int ret, status; + // TAR was added in v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT); pid = fork(); if (pid < 0) { diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c index c4fe0e893306..cb9875f764ca 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c @@ -61,6 +61,8 @@ int ptrace_vsx(void) pid_t pid; int ret, status, i; + SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_VSX)); + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); for (i = 0; i < VEC_MAX; i++) diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile index 7fc0623d85c3..9c39f55a58ff 100644 --- a/tools/testing/selftests/powerpc/stringloops/Makefile +++ b/tools/testing/selftests/powerpc/stringloops/Makefile @@ -8,7 +8,7 @@ build_32bit = $(shell if ($(CC) $(CFLAGS) -m32 -o /dev/null memcmp.c >/dev/null TEST_GEN_PROGS := memcmp_64 strlen -$(OUTPUT)/memcmp_64: memcmp.c +$(OUTPUT)/memcmp_64: memcmp.c ../utils.c $(OUTPUT)/memcmp_64: CFLAGS += -m64 -maltivec ifeq ($(build_32bit),1) diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c index e4605ca850dc..979df3d98368 100644 --- a/tools/testing/selftests/powerpc/stringloops/memcmp.c +++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "utils.h" #define SIZE 256 @@ -151,6 +152,11 @@ static int testcase(bool islarge) static int testcases(void) { +#ifdef __powerpc64__ + // vcmpequd used in memcmp_64.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); +#endif + testcase(0); testcase(1); return 0; -- cgit v1.2.3 From 2075ec9896c5aef01e837198381d04cfa6452317 Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Mon, 3 Aug 2020 17:54:08 +1000 Subject: powerpc/powernv/sriov: Fix use of uninitialised variable Initialising the value before using it is generally regarded as a good idea so do that. Fixes: 4c51f3e1e870 ("powerpc/powernv/sriov: Make single PE mode a per-BAR setting") Reported-by: Nathan Chancellor Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200803075408.132601-1-oohall@gmail.com --- arch/powerpc/platforms/powernv/pci-sriov.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 7894745fd4f8..c4434f20f42f 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -253,9 +253,9 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno) { + resource_size_t align = pci_iov_resource_size(pdev, resno); struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pnv_iov_data *iov = pnv_iov_get(pdev); - resource_size_t align; /* * iov can be null if we have an SR-IOV device with IOV BAR that can't @@ -266,8 +266,6 @@ resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, if (!iov) return align; - align = pci_iov_resource_size(pdev, resno); - /* * If we're using single mode then we can just use the native VF BAR * alignment. We validated that it's possible to use a single PE -- cgit v1.2.3 From 0c83b277ada72b585e6a3e52b067669df15bcedb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 4 Aug 2020 22:44:06 +1000 Subject: powerpc: Fix circular dependency between percpu.h and mmu.h Recently random.h started including percpu.h (see commit f227e3ec3b5c ("random32: update the net random state on interrupt and activity")), which broke corenet64_smp_defconfig: In file included from /linux/arch/powerpc/include/asm/paca.h:18, from /linux/arch/powerpc/include/asm/percpu.h:13, from /linux/include/linux/random.h:14, from /linux/lib/uuid.c:14: /linux/arch/powerpc/include/asm/mmu.h:139:22: error: unknown type name 'next_tlbcam_idx' 139 | DECLARE_PER_CPU(int, next_tlbcam_idx); This is due to a circular header dependency: asm/mmu.h includes asm/percpu.h, which includes asm/paca.h, which includes asm/mmu.h Which means DECLARE_PER_CPU() isn't defined when mmu.h needs it. We can fix it by moving the include of paca.h below the include of asm-generic/percpu.h. This moves the include of paca.h out of the #ifdef __powerpc64__, but that is OK because paca.h is almost entirely inside #ifdef CONFIG_PPC64 anyway. It also moves the include of paca.h out of the #ifdef CONFIG_SMP, which could possibly break something, but seems to have no ill effects. Fixes: f227e3ec3b5c ("random32: update the net random state on interrupt and activity") Cc: stable@vger.kernel.org # v5.8 Reported-by: Stephen Rothwell Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200804130558.292328-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/percpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index dce863a7635c..8e5b7d0b851c 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -10,8 +10,6 @@ #ifdef CONFIG_SMP -#include - #define __my_cpu_offset local_paca->data_offset #endif /* CONFIG_SMP */ @@ -19,4 +17,6 @@ #include +#include + #endif /* _ASM_POWERPC_PERCPU_H_ */ -- cgit v1.2.3 From a7aaa2f26bfd932a654706b19859e7adf802bee2 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 4 Aug 2020 23:01:37 +0530 Subject: selftests/powerpc: Fix pkey syscall redefinitions On distros using older glibc versions, the pkey tests encounter build failures due to redefinition of the pkey syscall numbers. For compatibility, commit 743f3544fffb added a wrapper for the gettid() syscall and included syscall.h if the version of glibc used is older than 2.30. This leads to different definitions of SYS_pkey_* as the ones in the pkey test header set numeric constants where as the ones from syscall.h reuse __NR_pkey_*. The compiler complains about redefinitions since they are different. This replaces SYS_pkey_* definitions with __NR_pkey_* such that the definitions in both syscall.h and pkeys.h are alike. This way, if syscall.h has to be included for compatibility reasons, builds will still succeed. Fixes: 743f3544fffb ("selftests/powerpc: Add wrapper for gettid") Reported-by: Sachin Sant Suggested-by: David Laight Suggested-by: Michael Ellerman Signed-off-by: Sandipan Das Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a4956d838bf59b0a71a2553c5ca81131ea8b49b9.1596561758.git.sandipan@linux.ibm.com --- tools/testing/selftests/powerpc/include/pkeys.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/powerpc/include/pkeys.h b/tools/testing/selftests/powerpc/include/pkeys.h index 6ba95039a034..3312cb1b058d 100644 --- a/tools/testing/selftests/powerpc/include/pkeys.h +++ b/tools/testing/selftests/powerpc/include/pkeys.h @@ -31,9 +31,9 @@ #define SI_PKEY_OFFSET 0x20 -#define SYS_pkey_mprotect 386 -#define SYS_pkey_alloc 384 -#define SYS_pkey_free 385 +#define __NR_pkey_mprotect 386 +#define __NR_pkey_alloc 384 +#define __NR_pkey_free 385 #define PKEY_BITS_PER_PKEY 2 #define NR_PKEYS 32 @@ -62,17 +62,17 @@ void pkey_set_rights(int pkey, unsigned long rights) int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey) { - return syscall(SYS_pkey_mprotect, addr, len, prot, pkey); + return syscall(__NR_pkey_mprotect, addr, len, prot, pkey); } int sys_pkey_alloc(unsigned long flags, unsigned long rights) { - return syscall(SYS_pkey_alloc, flags, rights); + return syscall(__NR_pkey_alloc, flags, rights); } int sys_pkey_free(int pkey) { - return syscall(SYS_pkey_free, pkey); + return syscall(__NR_pkey_free, pkey); } int pkeys_unsupported(void) -- cgit v1.2.3