From bf47dc572aaf53c3f2311c71e03ec01be3131fd6 Mon Sep 17 00:00:00 2001 From: Sascha Silbe Date: Mon, 4 Jul 2016 17:16:47 +0200 Subject: s390: clarify compressed image code path The way the decompressor is hooked into the start-up code is rather subtle, with a mix of multiply-defined symbols and hardcoded address literals. Add some comments at the junction points to clarify how it works. Signed-off-by: Sascha Silbe Signed-off-by: Martin Schwidefsky --- arch/s390/boot/compressed/head.S | 11 ++++++++--- arch/s390/kernel/head.S | 4 +++- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S index f86a4eef28a9..28c4f96a2d9c 100644 --- a/arch/s390/boot/compressed/head.S +++ b/arch/s390/boot/compressed/head.S @@ -21,16 +21,21 @@ ENTRY(startup_continue) lg %r15,.Lstack-.LPG1(%r13) aghi %r15,-160 brasl %r14,decompress_kernel - # setup registers for memory mover & branch to target + # Set up registers for memory mover. We move the decompressed image to + # 0x11000, starting at offset 0x11000 in the decompressed image so + # that code living at 0x11000 in the image will end up at 0x11000 in + # memory. lgr %r4,%r2 lg %r2,.Loffset-.LPG1(%r13) la %r4,0(%r2,%r4) lg %r3,.Lmvsize-.LPG1(%r13) lgr %r5,%r3 - # move the memory mover someplace safe + # Move the memory mover someplace safe so it doesn't overwrite itself. la %r1,0x200 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) - # decompress image is started at 0x11000 + # When the memory mover is done we pass control to + # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in + # the decompressed image. lgr %r6,%r2 br %r1 mover: diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 56e4d8234ef2..4431905f8cfa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -309,7 +309,9 @@ ENTRY(startup_kdump) l %r15,.Lstack-.LPG0(%r13) ahi %r15,-STACK_FRAME_OVERHEAD brasl %r14,verify_facilities - /* Continue with startup code in head64.S */ +# For uncompressed images, continue in +# arch/s390/kernel/head64.S. For compressed images, continue in +# arch/s390/boot/compressed/head.S. jg startup_continue .Lstack: -- cgit v1.2.3 From 134a24cd895c5d138d639a0741ad27e389cd4562 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Wed, 3 Aug 2016 17:55:00 +0200 Subject: s390/crc32-vx: Fix checksum calculation for small sizes The current prealign logic will fail for sizes < alignment, as the new datalen passed to the vector function is smaller than zero. Being a size_t this gets wrapped to a huge number causing memory overruns and wrong data. Let's add an early exit if the size is smaller than the minimal size with alignment. This will also avoid calling the software fallback twice for all sizes smaller than the minimum size (prealign + remaining) Signed-off-by: Christian Borntraeger Fixes: f848dbd3bc1a ("s390/crc32-vx: add crypto API module for optimized CRC-32 algorithms") Reviewed-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/crypto/crc32-vx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 577ae1d4ae89..2bad9d837029 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); struct kernel_fpu vxstate; \ unsigned long prealign, aligned, remaining; \ \ + if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \ + return ___crc32_sw(crc, data, datalen); \ + \ if ((unsigned long)data & VX_ALIGN_MASK) { \ prealign = VX_ALIGNMENT - \ ((unsigned long)data & VX_ALIGN_MASK); \ @@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); data = (void *)((unsigned long)data + prealign); \ } \ \ - if (datalen < VX_MIN_LEN) \ - return ___crc32_sw(crc, data, datalen); \ - \ aligned = datalen & ~VX_ALIGN_MASK; \ remaining = datalen & VX_ALIGN_MASK; \ \ -- cgit v1.2.3 From e2efc424549eb66d227dfe6d073d1789a24bc698 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 4 Aug 2016 10:24:30 +0200 Subject: s390/lib: fix memcmp and strstr if two string compare equal the clcle instruction will update the string addresses to point _after_ the string. This might already be on a different page, so we should not use these pointer to calculate the difference as in that case the calculation of the difference can cause oopses. The return value of memcmp does not need the difference, we can just reuse the condition code and return for CC=1 (All bytes compared, first operand low) -1 and for CC=2 (All bytes compared, first operand high) +1 strstr also does not need the diff. While fixing this, make the common function clcle "correct on its own" by using l1 instead of l2 for the first length. strstr will call this with l2 for both strings. Signed-off-by: Christian Borntraeger Fixes: db7f5eef3dc0 ("s390/lib: use basic blocks for inline assemblies") Reviewed-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/lib/string.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index e390bbb16443..48352bffbc92 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -237,11 +237,10 @@ char * strrchr(const char * s, int c) EXPORT_SYMBOL(strrchr); static inline int clcle(const char *s1, unsigned long l1, - const char *s2, unsigned long l2, - int *diff) + const char *s2, unsigned long l2) { register unsigned long r2 asm("2") = (unsigned long) s1; - register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r3 asm("3") = (unsigned long) l1; register unsigned long r4 asm("4") = (unsigned long) s2; register unsigned long r5 asm("5") = (unsigned long) l2; int cc; @@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1, " srl %0,28" : "=&d" (cc), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) : : "cc"); - *diff = *(char *)r2 - *(char *)r4; return cc; } @@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2) return (char *) s1; l1 = __strend(s1) - s1; while (l1-- >= l2) { - int cc, dummy; + int cc; - cc = clcle(s1, l1, s2, l2, &dummy); + cc = clcle(s1, l2, s2, l2); if (!cc) return (char *) s1; s1++; @@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr); */ int memcmp(const void *cs, const void *ct, size_t n) { - int ret, diff; + int ret; - ret = clcle(cs, n, ct, n, &diff); + ret = clcle(cs, n, ct, n); if (ret) - ret = diff; + ret = ret == 1 ? -1 : 1; return ret; } EXPORT_SYMBOL(memcmp); -- cgit v1.2.3 From 6e127efeeae5bbbc323e028d96c43dec0cc7b1b8 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 4 Aug 2016 12:51:09 +0200 Subject: s390/config: make the vector optimized crc function builtin For all configs with CONFIG_BTRFS_FS = y we should also make the optimized crc module builtin. Otherwise early mounts will fall back to the software variant. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/configs/default_defconfig | 2 +- arch/s390/configs/gcov_defconfig | 2 +- arch/s390/configs/performance_defconfig | 2 +- arch/s390/defconfig | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 889ea3450210..26e0c7f08814 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 1bcfd764910a..24879dab47bc 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 13ff090139c8..a5c1e5f2a0ca 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m diff --git a/arch/s390/defconfig b/arch/s390/defconfig index ccccebeeaaf6..73610f2e3b4f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_CRC32_S390=m +CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRC7=m # CONFIG_XZ_DEC_X86 is not set # CONFIG_XZ_DEC_POWERPC is not set -- cgit v1.2.3 From 4d81aaa53c2dea220ddf88e19c33033d6cf4f8cb Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 Aug 2016 12:26:28 +0200 Subject: s390/pageattr: handle numpages parameter correctly Both set_memory_ro() and set_memory_rw() will modify the page attributes of at least one page, even if the numpages parameter is zero. The author expected that calling these functions with numpages == zero would never happen. However with the new 444d13ff10fb ("modules: add ro_after_init support") feature this happens frequently. Therefore do the right thing and make these two functions return gracefully if nothing should be done. Fixes crashes on module load like this one: Unable to handle kernel pointer dereference in virtual kernel address space Failing address: 000003ff80008000 TEID: 000003ff80008407 Fault in home space mode while using kernel ASCE. AS:0000000000d18007 R3:00000001e6aa4007 S:00000001e6a10800 P:00000001e34ee21d Oops: 0004 ilc:3 [#1] SMP Modules linked in: x_tables CPU: 10 PID: 1 Comm: systemd Not tainted 4.7.0-11895-g3fa9045 #4 Hardware name: IBM 2964 N96 703 (LPAR) task: 00000001e9118000 task.stack: 00000001e9120000 Krnl PSW : 0704e00180000000 00000000005677f8 (rb_erase+0xf0/0x4d0) R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:2 PM:0 RI:0 EA:3 Krnl GPRS: 000003ff80008b20 000003ff80008b20 000003ff80008b70 0000000000b9d608 000003ff80008b20 0000000000000000 00000001e9123e88 000003ff80008950 00000001e485ab40 000003ff00000000 000003ff80008b00 00000001e4858480 0000000100000000 000003ff80008b68 00000000001d5998 00000001e9123c28 Krnl Code: 00000000005677e8: ec1801c3007c cgij %r1,0,8,567b6e 00000000005677ee: e32010100020 cg %r2,16(%r1) #00000000005677f4: a78401c2 brc 8,567b78 >00000000005677f8: e35010080024 stg %r5,8(%r1) 00000000005677fe: ec5801af007c cgij %r5,0,8,567b5c 0000000000567804: e30050000024 stg %r0,0(%r5) 000000000056780a: ebacf0680004 lmg %r10,%r12,104(%r15) 0000000000567810: 07fe bcr 15,%r14 Call Trace: ([<000003ff80008900>] __this_module+0x0/0xffffffffffffd700 [x_tables]) ([<0000000000264fd4>] do_init_module+0x12c/0x220) ([<00000000001da14a>] load_module+0x24e2/0x2b10) ([<00000000001da976>] SyS_finit_module+0xbe/0xd8) ([<0000000000803b26>] system_call+0xd6/0x264) Last Breaking-Event-Address: [<000000000056771a>] rb_erase+0x12/0x4d0 Kernel panic - not syncing: Fatal exception: panic_on_oops Reported-by: Christian Borntraeger Reported-and-tested-by: Sebastian Ott Fixes: e8a97e42dc98 ("s390/pageattr: allow kernel page table splitting") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/pageattr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 7104ffb5a67f..af7cf28cf97e 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end, int rc = -EINVAL; pgd_t *pgdp; + if (addr == end) + return 0; if (end >= MODULES_END) return -EINVAL; mutex_lock(&cpa_mutex); -- cgit v1.2.3