summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-21 22:09:36 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-21 22:09:36 +0300
commit2a8120d7b4827380f30b57788ff92ec5594e2194 (patch)
tree27790b72bc2748fa28d4634edd83ae7a5cecd204 /arch/s390
parent334e5639fc4e20752d46552abefc5311351e3bee (diff)
parentc1248638f8c35b74400efa7e02e78ecda23373f9 (diff)
downloadlinux-2a8120d7b4827380f30b57788ff92ec5594e2194.tar.xz
Merge tag 's390-6.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Alexander Gordeev: - Switch read and write software bits for PUDs - Add missing hardware bits for PUDs and PMDs - Generate unwind information for C modules to fix GDB unwind error for vDSO functions - Create .build-id links for unstripped vDSO files to enable vDSO debugging with symbols - Use standard stack frame layout for vDSO generated stack frames to manually walk stack frames without DWARF information - Rework perf_callchain_user() and arch_stack_walk_user() functions to reduce code duplication - Skip first stack frame when walking user stack - Add basic checks to identify invalid instruction pointers when walking stack frames - Introduce and use struct stack_frame_vdso_wrapper within vDSO user wrapper code to automatically generate an asm-offset define. Also use STACK_FRAME_USER_OVERHEAD instead of STACK_FRAME_OVERHEAD to document that the code works with user space stack - Clear the backchain of the extra stack frame added by the vDSO user wrapper code. This allows the user stack walker to detect and skip the non-standard stack frame. Without this an incorrect instruction pointer would be added to stack traces. - Rewrite psw_idle() function in C to ease maintenance and further enhancements - Remove get_vtimer() function and use get_cpu_timer() instead - Mark psw variable in __load_psw_mask() as __unitialized to avoid superfluous clearing of PSW - Remove obsolete and superfluous comment about removed TIF_FPU flag - Replace memzero_explicit() and kfree() with kfree_sensitive() to fix warnings reported by Coccinelle - Wipe sensitive data and all copies of protected- or secure-keys from stack when an IOCTL fails - Both do_airq_interrupt() and do_io_interrupt() functions set CIF_NOHZ_DELAY flag. Move it in do_io_irq() to simplify the code - Provide iucv_alloc_device() and iucv_release_device() helpers, which can be used to deduplicate more or less identical IUCV device allocation and release code in four different drivers - Make use of iucv_alloc_device() and iucv_release_device() helpers to get rid of quite some code and also remove a cast to an incompatible function (clang W=1) - There is no user of iucv_root outside of the core IUCV code left. Therefore remove the EXPORT_SYMBOL - __apply_alternatives() contains a runtime check which verifies that the size of the to be patched code area is even. Convert this to a compile time check - Increase size of buffers for sending z/VM CP DIAGNOSE X'008' commands from 128 to 240 - Do not accept z/VM CP DIAGNOSE X'008' commands longer than maximally allowed - Use correct defines IPL_BP_NVME_LEN and IPL_BP0_NVME_LEN instead of IPL_BP_FCP_LEN and IPL_BP0_FCP_LEN ones to initialize NVMe reIPL block on 'scp_data' sysfs attribute update - Initialize the correct fields of the NVMe dump block, which were confused with FCP fields - Refactor macros for 'scp_data' (re-)IPL sysfs attribute to reduce code duplication - Introduce 'scp_data' sysfs attribute for dump IPL to allow tools such as dumpconf passing additional kernel command line parameters to a stand-alone dumper - Rework the CPACF query functions to use the correct RRE or RRF instruction formats and set instruction register fields correctly - Instead of calling BUG() at runtime force a link error during compile when a unsupported opcode is used with __cpacf_query() or __cpacf_check_opcode() functions - Fix a crash in ap_parse_bitmap_str() function on /sys/bus/ap/apmask or /sys/bus/ap/aqmask sysfs file update with a relative mask value - Fix "bindings complete" udev event which should be sent once all AP devices have been bound to device drivers and again when unbind/bind actions take place and all AP devices are bound again - Facility list alt_stfle_fac_list is nowhere used in the decompressor, therefore remove it there - Remove custom kprobes insn slot allocator in favour of the standard module_alloc() one, since kernel image and module areas are located within 4GB - Use kvcalloc() instead of kvmalloc_array() in zcrypt driver to avoid calling memset() with a large byte count and get rid of the sparse warning as result * tag 's390-6.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits) s390/zcrypt: Use kvcalloc() instead of kvmalloc_array() s390/kprobes: Remove custom insn slot allocator s390/boot: Remove alt_stfle_fac_list from decompressor s390/ap: Fix bind complete udev event sent after each AP bus scan s390/ap: Fix crash in AP internal function modify_bitmap() s390/cpacf: Make use of invalid opcode produce a link error s390/cpacf: Split and rework cpacf query functions s390/ipl: Introduce sysfs attribute 'scp_data' for dump ipl s390/ipl: Introduce macros for (re)ipl sysfs attribute 'scp_data' s390/ipl: Fix incorrect initialization of nvme dump block s390/ipl: Fix incorrect initialization of len fields in nvme reipl block s390/ipl: Do not accept z/VM CP diag X'008' cmds longer than max length s390/ipl: Fix size of vmcmd buffers for sending z/VM CP diag X'008' cmds s390/alternatives: Convert runtime sanity check into compile time check s390/iucv: Unexport iucv_root tty: hvc-iucv: Make use of iucv_alloc_device() s390/smsgiucv_app: Make use of iucv_alloc_device() s390/netiucv: Make use of iucv_alloc_device() s390/vmlogrdr: Make use of iucv_alloc_device() s390/iucv: Provide iucv_alloc_device() / iucv_release_device() ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/boot/startup.c1
-rw-r--r--arch/s390/include/asm/alternative-asm.h1
-rw-r--r--arch/s390/include/asm/alternative.h1
-rw-r--r--arch/s390/include/asm/cpacf.h109
-rw-r--r--arch/s390/include/asm/pgtable.h12
-rw-r--r--arch/s390/include/asm/processor.h17
-rw-r--r--arch/s390/include/asm/stacktrace.h12
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/alternative.c7
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/entry.S23
-rw-r--r--arch/s390/kernel/idle.c10
-rw-r--r--arch/s390/kernel/ipl.c319
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/kprobes.c67
-rw-r--r--arch/s390/kernel/kprobes.h9
-rw-r--r--arch/s390/kernel/kprobes_insn_page.S22
-rw-r--r--arch/s390/kernel/perf_event.c34
-rw-r--r--arch/s390/kernel/process.c5
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/stacktrace.c89
-rw-r--r--arch/s390/kernel/vdso.c13
-rw-r--r--arch/s390/kernel/vdso32/Makefile4
-rw-r--r--arch/s390/kernel/vdso64/Makefile4
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S19
-rw-r--r--arch/s390/kernel/vtime.c10
26 files changed, 389 insertions, 412 deletions
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 467283b112cd..182aac6a0f77 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -32,7 +32,6 @@ unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata_preserved(max_mappable);
u64 __bootdata_preserved(stfle_fac_list[16]);
-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
struct machine_info machine;
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
index 7db046596b93..608f6287ca9c 100644
--- a/arch/s390/include/asm/alternative-asm.h
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -15,6 +15,7 @@
.long \alt_start - .
.word \feature
.byte \orig_end - \orig_start
+ .org . - ( \orig_end - \orig_start ) & 1
.org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
.org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
.endm
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index 904dd049f954..dd93b92c3ab6 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -53,6 +53,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
"\t.word " __stringify(facility) "\n" /* facility bit */ \
"\t.byte " oldinstr_len "\n" /* instruction len */ \
+ "\t.org . - (" oldinstr_len ") & 1\n" \
"\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \
"\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n"
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index b378e2b57ad8..c786538e397c 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -166,28 +166,86 @@
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
-/**
- * cpacf_query() - check if a specific CPACF function is available
- * @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
- *
- * Executes the query function for the given crypto instruction @opcode
- * and checks if @func is available
- *
- * Returns 1 if @func is available for @opcode, 0 otherwise
+/*
+ * Prototype for a not existing function to produce a link
+ * error if __cpacf_query() or __cpacf_check_opcode() is used
+ * with an invalid compile time const opcode.
*/
-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+void __cpacf_bad_opcode(void);
+
+static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
+ cpacf_mask_t *mask)
{
asm volatile(
- " lghi 0,0\n" /* query function */
- " lgr 1,%[mask]\n"
- " spm 0\n" /* pckmo doesn't change the cc */
- /* Parameter regs are ignored, but must be nonzero and unique */
- "0: .insn rrf,%[opc] << 16,2,4,6,0\n"
- " brc 1,0b\n" /* handle partial completion */
- : "=m" (*mask)
- : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
- : "cc", "0", "1");
+ " la %%r1,%[mask]\n"
+ " xgr %%r0,%%r0\n"
+ " .insn rre,%[opc] << 16,%[r1],%[r2]\n"
+ : [mask] "=R" (*mask)
+ : [opc] "i" (opc),
+ [r1] "i" (r1), [r2] "i" (r2)
+ : "cc", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query_rrf(u32 opc,
+ u8 r1, u8 r2, u8 r3, u8 m4,
+ cpacf_mask_t *mask)
+{
+ asm volatile(
+ " la %%r1,%[mask]\n"
+ " xgr %%r0,%%r0\n"
+ " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
+ : [mask] "=R" (*mask)
+ : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
+ [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ switch (opcode) {
+ case CPACF_KDSA:
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
+ break;
+ case CPACF_KIMD:
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
+ break;
+ case CPACF_KLMD:
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
+ break;
+ case CPACF_KM:
+ __cpacf_query_rre(CPACF_KM, 2, 4, mask);
+ break;
+ case CPACF_KMA:
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
+ break;
+ case CPACF_KMAC:
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
+ break;
+ case CPACF_KMC:
+ __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
+ break;
+ case CPACF_KMCTR:
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
+ break;
+ case CPACF_KMF:
+ __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
+ break;
+ case CPACF_KMO:
+ __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
+ break;
+ case CPACF_PCC:
+ __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
+ break;
+ case CPACF_PCKMO:
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
+ break;
+ case CPACF_PRNO:
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
+ break;
+ default:
+ __cpacf_bad_opcode();
+ }
}
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -211,10 +269,21 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
default:
- BUG();
+ __cpacf_bad_opcode();
+ return 0;
}
}
+/**
+ * cpacf_query() - check if a specific CPACF function is available
+ * @opcode: the opcode of the crypto instruction
+ * @func: the function code to test for
+ *
+ * Executes the query function for the given crypto instruction @opcode
+ * and checks if @func is available
+ *
+ * Returns 1 if @func is available for @opcode, 0 otherwise
+ */
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
if (__cpacf_check_opcode(opcode)) {
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ad55b5b99a75..70b6ee557eb2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -268,12 +268,14 @@ static inline int is_module_addr(void *addr)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
+#define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
+#define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
-#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
-#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
+#define _REGION3_ENTRY_WRITE 0x0002 /* SW region write bit */
+#define _REGION3_ENTRY_READ 0x0001 /* SW region read bit */
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
@@ -284,9 +286,9 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
-#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
+#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index db9982f0e8cd..07ad5a1df878 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -40,6 +40,7 @@
#include <asm/setup.h>
#include <asm/runtime_instr.h>
#include <asm/irqflags.h>
+#include <asm/alternative.h>
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
@@ -92,12 +93,21 @@ static inline void get_cpu_id(struct cpuid *ptr)
asm volatile("stidp %0" : "=Q" (*ptr));
}
+static __always_inline unsigned long get_cpu_timer(void)
+{
+ unsigned long timer;
+
+ asm volatile("stpt %[timer]" : [timer] "=Q" (timer));
+ return timer;
+}
+
void s390_adjust_jiffies(void);
void s390_update_cpu_mhz(void);
void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
+unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
/*
@@ -304,8 +314,8 @@ static inline void __load_psw(psw_t psw)
*/
static __always_inline void __load_psw_mask(unsigned long mask)
{
+ psw_t psw __uninitialized;
unsigned long addr;
- psw_t psw;
psw.mask = mask;
@@ -393,6 +403,11 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
return arch_irqs_disabled_flags(regs->psw.mask);
}
+static __always_inline void bpon(void)
+{
+ asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", 82));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 433fde85b14e..85b6738b826a 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_STACKTRACE_H
#define _ASM_S390_STACKTRACE_H
+#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
@@ -12,6 +13,17 @@ struct stack_frame_user {
unsigned long empty2[4];
};
+struct stack_frame_vdso_wrapper {
+ struct stack_frame_user sf;
+ unsigned long return_address;
+};
+
+struct perf_callchain_entry_ctx;
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf);
+
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index db2d9ba5a86d..7241fa194709 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KPROBES) += kprobes_insn_page.o
obj-$(CONFIG_KPROBES) += mcount.o
obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index e7bca29f9c34..1ac5f707dd70 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -33,13 +33,6 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
-
- if (unlikely(a->instrlen % 2)) {
- WARN_ONCE(1, "cpu alternatives instructions length is "
- "odd, skipping patching\n");
- continue;
- }
-
s390_kernel_write(instr, replacement, a->instrlen);
}
}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fa5f6885c74a..f55979f64d49 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -13,7 +13,6 @@
#include <linux/purgatory.h>
#include <linux/pgtable.h>
#include <linux/ftrace.h>
-#include <asm/idle.h>
#include <asm/gmap.h>
#include <asm/stacktrace.h>
@@ -66,10 +65,10 @@ int main(void)
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
- /* idle data offsets */
- OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
- OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
- OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter);
+ OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
+ DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
+ OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
+ DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6a1e0fbbaa15..60cf917a7122 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -441,29 +441,6 @@ INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
/*
- * Load idle PSW.
- */
-SYM_FUNC_START(psw_idle)
- stg %r14,(__SF_GPRS+8*8)(%r15)
- stg %r3,__SF_EMPTY(%r15)
- larl %r1,psw_idle_exit
- stg %r1,__SF_EMPTY+8(%r15)
- larl %r1,smp_cpu_mtid
- llgf %r1,0(%r1)
- ltgr %r1,%r1
- jz .Lpsw_idle_stcctm
- .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
-.Lpsw_idle_stcctm:
- oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
- BPON
- stckf __CLOCK_IDLE_ENTER(%r2)
- stpt __TIMER_IDLE_ENTER(%r2)
- lpswe __SF_EMPTY(%r15)
-SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL)
- BR_EX %r14
-SYM_FUNC_END(psw_idle)
-
-/*
* Machine check handler routines
*/
SYM_CODE_START(mcck_int_handler)
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index e7239aaf428b..af9c97c0ad73 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -57,9 +57,13 @@ void noinstr arch_cpu_idle(void)
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
-
- /* psw_idle() returns with interrupts disabled. */
- psw_idle(idle, psw_mask);
+ set_cpu_flag(CIF_ENABLED_WAIT);
+ if (smp_cpu_mtid)
+ stcctm(MT_DIAG, smp_cpu_mtid, (u64 *)&idle->mt_cycles_enter);
+ idle->clock_idle_enter = get_tod_clock_fast();
+ idle->timer_idle_enter = get_cpu_timer();
+ bpon();
+ __load_psw_mask(psw_mask);
}
static ssize_t show_idle_count(struct device *dev,
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f3609a0d453d..3a7d6e172211 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -267,7 +267,11 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
- strscpy(_value, buf, sizeof(_value)); \
+ if (len >= sizeof(_value)) \
+ return -E2BIG; \
+ len = strscpy(_value, buf, sizeof(_value)); \
+ if (len < 0) \
+ return len; \
strim(_value); \
return len; \
} \
@@ -276,6 +280,61 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
+#define IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \
+static ssize_t sys_##_prefix##_scp_data_show(struct file *filp, \
+ struct kobject *kobj, \
+ struct bin_attribute *attr, \
+ char *buf, loff_t off, \
+ size_t count) \
+{ \
+ size_t size = _ipl_block.scp_data_len; \
+ void *scp_data = _ipl_block.scp_data; \
+ \
+ return memory_read_from_buffer(buf, count, &off, \
+ scp_data, size); \
+}
+
+#define IPL_ATTR_SCP_DATA_STORE_FN(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len)\
+static ssize_t sys_##_prefix##_scp_data_store(struct file *filp, \
+ struct kobject *kobj, \
+ struct bin_attribute *attr, \
+ char *buf, loff_t off, \
+ size_t count) \
+{ \
+ size_t scpdata_len = count; \
+ size_t padding; \
+ \
+ if (off) \
+ return -EINVAL; \
+ \
+ memcpy(_ipl_block.scp_data, buf, count); \
+ if (scpdata_len % 8) { \
+ padding = 8 - (scpdata_len % 8); \
+ memset(_ipl_block.scp_data + scpdata_len, \
+ 0, padding); \
+ scpdata_len += padding; \
+ } \
+ \
+ _ipl_block_hdr.len = _ipl_bp_len + scpdata_len; \
+ _ipl_block.len = _ipl_bp0_len + scpdata_len; \
+ _ipl_block.scp_data_len = scpdata_len; \
+ \
+ return count; \
+}
+
+#define DEFINE_IPL_ATTR_SCP_DATA_RO(_prefix, _ipl_block, _size) \
+IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \
+static struct bin_attribute sys_##_prefix##_scp_data_attr = \
+ __BIN_ATTR(scp_data, 0444, sys_##_prefix##_scp_data_show, \
+ NULL, _size)
+
+#define DEFINE_IPL_ATTR_SCP_DATA_RW(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len, _size)\
+IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \
+IPL_ATTR_SCP_DATA_STORE_FN(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len)\
+static struct bin_attribute sys_##_prefix##_scp_data_attr = \
+ __BIN_ATTR(scp_data, 0644, sys_##_prefix##_scp_data_show, \
+ sys_##_prefix##_scp_data_store, _size)
+
/*
* ipl section
*/
@@ -374,71 +433,38 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
static struct kobj_attribute sys_ipl_device_attr =
__ATTR(device, 0444, sys_ipl_device_show, NULL);
-static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t sys_ipl_parameter_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return memory_read_from_buffer(buf, count, &off, &ipl_block,
ipl_block.hdr.len);
}
-static struct bin_attribute ipl_parameter_attr =
- __BIN_ATTR(binary_parameter, 0444, ipl_parameter_read, NULL,
+static struct bin_attribute sys_ipl_parameter_attr =
+ __BIN_ATTR(binary_parameter, 0444, sys_ipl_parameter_read, NULL,
PAGE_SIZE);
-static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- unsigned int size = ipl_block.fcp.scp_data_len;
- void *scp_data = &ipl_block.fcp.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- unsigned int size = ipl_block.nvme.scp_data_len;
- void *scp_data = &ipl_block.nvme.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t ipl_eckd_scp_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- unsigned int size = ipl_block.eckd.scp_data_len;
- void *scp_data = &ipl_block.eckd.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static struct bin_attribute ipl_scp_data_attr =
- __BIN_ATTR(scp_data, 0444, ipl_scp_data_read, NULL, PAGE_SIZE);
-
-static struct bin_attribute ipl_nvme_scp_data_attr =
- __BIN_ATTR(scp_data, 0444, ipl_nvme_scp_data_read, NULL, PAGE_SIZE);
-
-static struct bin_attribute ipl_eckd_scp_data_attr =
- __BIN_ATTR(scp_data, 0444, ipl_eckd_scp_data_read, NULL, PAGE_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_fcp, ipl_block.fcp, PAGE_SIZE);
static struct bin_attribute *ipl_fcp_bin_attrs[] = {
- &ipl_parameter_attr,
- &ipl_scp_data_attr,
+ &sys_ipl_parameter_attr,
+ &sys_ipl_fcp_scp_data_attr,
NULL,
};
+DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_nvme, ipl_block.nvme, PAGE_SIZE);
+
static struct bin_attribute *ipl_nvme_bin_attrs[] = {
- &ipl_parameter_attr,
- &ipl_nvme_scp_data_attr,
+ &sys_ipl_parameter_attr,
+ &sys_ipl_nvme_scp_data_attr,
NULL,
};
+DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_eckd, ipl_block.eckd, PAGE_SIZE);
+
static struct bin_attribute *ipl_eckd_bin_attrs[] = {
- &ipl_parameter_attr,
- &ipl_eckd_scp_data_attr,
+ &sys_ipl_parameter_attr,
+ &sys_ipl_eckd_scp_data_attr,
NULL,
};
@@ -777,44 +803,10 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
-static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t size = reipl_block_fcp->fcp.scp_data_len;
- void *scp_data = reipl_block_fcp->fcp.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t scpdata_len = count;
- size_t padding;
-
-
- if (off)
- return -EINVAL;
-
- memcpy(reipl_block_fcp->fcp.scp_data, buf, count);
- if (scpdata_len % 8) {
- padding = 8 - (scpdata_len % 8);
- memset(reipl_block_fcp->fcp.scp_data + scpdata_len,
- 0, padding);
- scpdata_len += padding;
- }
-
- reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
- reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len;
- reipl_block_fcp->fcp.scp_data_len = scpdata_len;
-
- return count;
-}
-static struct bin_attribute sys_reipl_fcp_scp_data_attr =
- __BIN_ATTR(scp_data, 0644, reipl_fcp_scpdata_read,
- reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_fcp, reipl_block_fcp->hdr,
+ reipl_block_fcp->fcp,
+ IPL_BP_FCP_LEN, IPL_BP0_FCP_LEN,
+ DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
&sys_reipl_fcp_scp_data_attr,
@@ -935,44 +927,10 @@ static struct kobj_attribute sys_reipl_fcp_clear_attr =
/* NVME reipl device attributes */
-static ssize_t reipl_nvme_scpdata_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t size = reipl_block_nvme->nvme.scp_data_len;
- void *scp_data = reipl_block_nvme->nvme.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t scpdata_len = count;
- size_t padding;
-
- if (off)
- return -EINVAL;
-
- memcpy(reipl_block_nvme->nvme.scp_data, buf, count);
- if (scpdata_len % 8) {
- padding = 8 - (scpdata_len % 8);
- memset(reipl_block_nvme->nvme.scp_data + scpdata_len,
- 0, padding);
- scpdata_len += padding;
- }
-
- reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
- reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
- reipl_block_nvme->nvme.scp_data_len = scpdata_len;
-
- return count;
-}
-
-static struct bin_attribute sys_reipl_nvme_scp_data_attr =
- __BIN_ATTR(scp_data, 0644, reipl_nvme_scpdata_read,
- reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_nvme, reipl_block_nvme->hdr,
+ reipl_block_nvme->nvme,
+ IPL_BP_NVME_LEN, IPL_BP0_NVME_LEN,
+ DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_nvme_bin_attrs[] = {
&sys_reipl_nvme_scp_data_attr,
@@ -1068,44 +1026,10 @@ static struct attribute_group reipl_ccw_attr_group_lpar = {
/* ECKD reipl device attributes */
-static ssize_t reipl_eckd_scpdata_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t size = reipl_block_eckd->eckd.scp_data_len;
- void *scp_data = reipl_block_eckd->eckd.scp_data;
-
- return memory_read_from_buffer(buf, count, &off, scp_data, size);
-}
-
-static ssize_t reipl_eckd_scpdata_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- size_t scpdata_len = count;
- size_t padding;
-
- if (off)
- return -EINVAL;
-
- memcpy(reipl_block_eckd->eckd.scp_data, buf, count);
- if (scpdata_len % 8) {
- padding = 8 - (scpdata_len % 8);
- memset(reipl_block_eckd->eckd.scp_data + scpdata_len,
- 0, padding);
- scpdata_len += padding;
- }
-
- reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN + scpdata_len;
- reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN + scpdata_len;
- reipl_block_eckd->eckd.scp_data_len = scpdata_len;
-
- return count;
-}
-
-static struct bin_attribute sys_reipl_eckd_scp_data_attr =
- __BIN_ATTR(scp_data, 0644, reipl_eckd_scpdata_read,
- reipl_eckd_scpdata_write, DIAG308_SCPDATA_SIZE);
+DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_eckd, reipl_block_eckd->hdr,
+ reipl_block_eckd->eckd,
+ IPL_BP_ECKD_LEN, IPL_BP0_ECKD_LEN,
+ DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_eckd_bin_attrs[] = {
&sys_reipl_eckd_scp_data_attr,
@@ -1649,6 +1573,11 @@ DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
dump_block_fcp->fcp.devno);
+DEFINE_IPL_ATTR_SCP_DATA_RW(dump_fcp, dump_block_fcp->hdr,
+ dump_block_fcp->fcp,
+ IPL_BP_FCP_LEN, IPL_BP0_FCP_LEN,
+ DIAG308_SCPDATA_SIZE);
+
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
&sys_dump_fcp_wwpn_attr.attr,
@@ -1658,9 +1587,15 @@ static struct attribute *dump_fcp_attrs[] = {
NULL,
};
+static struct bin_attribute *dump_fcp_bin_attrs[] = {
+ &sys_dump_fcp_scp_data_attr,
+ NULL,
+};
+
static struct attribute_group dump_fcp_attr_group = {
.name = IPL_FCP_STR,
.attrs = dump_fcp_attrs,
+ .bin_attrs = dump_fcp_bin_attrs,
};
/* NVME dump device attributes */
@@ -1673,6 +1608,11 @@ DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n",
dump_block_nvme->nvme.br_lba);
+DEFINE_IPL_ATTR_SCP_DATA_RW(dump_nvme, dump_block_nvme->hdr,
+ dump_block_nvme->nvme,
+ IPL_BP_NVME_LEN, IPL_BP0_NVME_LEN,
+ DIAG308_SCPDATA_SIZE);
+
static struct attribute *dump_nvme_attrs[] = {
&sys_dump_nvme_fid_attr.attr,
&sys_dump_nvme_nsid_attr.attr,
@@ -1681,9 +1621,15 @@ static struct attribute *dump_nvme_attrs[] = {
NULL,
};
+static struct bin_attribute *dump_nvme_bin_attrs[] = {
+ &sys_dump_nvme_scp_data_attr,
+ NULL,
+};
+
static struct attribute_group dump_nvme_attr_group = {
.name = IPL_NVME_STR,
.attrs = dump_nvme_attrs,
+ .bin_attrs = dump_nvme_bin_attrs,
};
/* ECKD dump device attributes */
@@ -1697,6 +1643,11 @@ IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd);
static struct kobj_attribute sys_dump_eckd_br_chr_attr =
__ATTR(br_chr, 0644, eckd_dump_br_chr_show, eckd_dump_br_chr_store);
+DEFINE_IPL_ATTR_SCP_DATA_RW(dump_eckd, dump_block_eckd->hdr,
+ dump_block_eckd->eckd,
+ IPL_BP_ECKD_LEN, IPL_BP0_ECKD_LEN,
+ DIAG308_SCPDATA_SIZE);
+
static struct attribute *dump_eckd_attrs[] = {
&sys_dump_eckd_device_attr.attr,
&sys_dump_eckd_bootprog_attr.attr,
@@ -1704,9 +1655,15 @@ static struct attribute *dump_eckd_attrs[] = {
NULL,
};
+static struct bin_attribute *dump_eckd_bin_attrs[] = {
+ &sys_dump_eckd_scp_data_attr,
+ NULL,
+};
+
static struct attribute_group dump_eckd_attr_group = {
.name = IPL_ECKD_STR,
.attrs = dump_eckd_attrs,
+ .bin_attrs = dump_eckd_bin_attrs,
};
/* CCW dump device attributes */
@@ -1859,9 +1816,9 @@ static int __init dump_nvme_init(void)
}
dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
- dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
- dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
+ dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
+ dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
+ dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_NVME;
return 0;
}
@@ -1959,11 +1916,13 @@ static struct shutdown_action __refdata dump_reipl_action = {
* vmcmd shutdown action: Trigger vm command on shutdown.
*/
-static char vmcmd_on_reboot[128];
-static char vmcmd_on_panic[128];
-static char vmcmd_on_halt[128];
-static char vmcmd_on_poff[128];
-static char vmcmd_on_restart[128];
+#define VMCMD_MAX_SIZE 240
+
+static char vmcmd_on_reboot[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_panic[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_halt[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_poff[VMCMD_MAX_SIZE + 1];
+static char vmcmd_on_restart[VMCMD_MAX_SIZE + 1];
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
@@ -2289,8 +2248,8 @@ static int __init vmcmd_on_reboot_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_reboot, str, 127);
- vmcmd_on_reboot[127] = 0;
+ strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE);
+ vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0;
on_reboot_trigger.action = &vmcmd_action;
return 1;
}
@@ -2300,8 +2259,8 @@ static int __init vmcmd_on_panic_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_panic, str, 127);
- vmcmd_on_panic[127] = 0;
+ strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE);
+ vmcmd_on_panic[VMCMD_MAX_SIZE] = 0;
on_panic_trigger.action = &vmcmd_action;
return 1;
}
@@ -2311,8 +2270,8 @@ static int __init vmcmd_on_halt_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_halt, str, 127);
- vmcmd_on_halt[127] = 0;
+ strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE);
+ vmcmd_on_halt[VMCMD_MAX_SIZE] = 0;
on_halt_trigger.action = &vmcmd_action;
return 1;
}
@@ -2322,8 +2281,8 @@ static int __init vmcmd_on_poff_setup(char *str)
{
if (!MACHINE_IS_VM)
return 1;
- strncpy_skip_quote(vmcmd_on_poff, str, 127);
- vmcmd_on_poff[127] = 0;
+ strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE);
+ vmcmd_on_poff[VMCMD_MAX_SIZE] = 0;
on_poff_trigger.action = &vmcmd_action;
return 1;
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 259496fe0ef9..9acc6630abd3 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -151,6 +151,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
if (from_idle)
account_idle_time_irq();
+ set_cpu_flag(CIF_NOHZ_DELAY);
do {
regs->tpi_info = S390_lowcore.tpi_info;
if (S390_lowcore.tpi_info.adapter_IO)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3c1b1be744de..05c83505e979 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -24,7 +24,6 @@
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/dis.h>
-#include "kprobes.h"
#include "entry.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
@@ -32,8 +31,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
-static int insn_page_in_use;
-
void *alloc_insn_page(void)
{
void *page;
@@ -45,26 +42,6 @@ void *alloc_insn_page(void)
return page;
}
-static void *alloc_s390_insn_page(void)
-{
- if (xchg(&insn_page_in_use, 1) == 1)
- return NULL;
- return &kprobes_insn_page;
-}
-
-static void free_s390_insn_page(void *page)
-{
- xchg(&insn_page_in_use, 0);
-}
-
-struct kprobe_insn_cache kprobe_s390_insn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex),
- .alloc = alloc_s390_insn_page,
- .free = free_s390_insn_page,
- .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages),
- .insn_size = MAX_INSN_SIZE,
-};
-
static void copy_instruction(struct kprobe *p)
{
kprobe_opcode_t insn[MAX_INSN_SIZE];
@@ -78,10 +55,10 @@ static void copy_instruction(struct kprobe *p)
if (probe_is_insn_relative_long(&insn[0])) {
/*
* For pc-relative instructions in RIL-b or RIL-c format patch
- * the RI2 displacement field. We have already made sure that
- * the insn slot for the patched instruction is within the same
- * 2GB area as the original instruction (either kernel image or
- * module area). Therefore the new displacement will always fit.
+ * the RI2 displacement field. The insn slot for the to be
+ * patched instruction is within the same 4GB area like the
+ * original instruction. Therefore the new displacement will
+ * always fit.
*/
disp = *(s32 *)&insn[1];
addr = (u64)(unsigned long)p->addr;
@@ -93,34 +70,6 @@ static void copy_instruction(struct kprobe *p)
}
NOKPROBE_SYMBOL(copy_instruction);
-static int s390_get_insn_slot(struct kprobe *p)
-{
- /*
- * Get an insn slot that is within the same 2GB area like the original
- * instruction. That way instructions with a 32bit signed displacement
- * field can be patched and executed within the insn slot.
- */
- p->ainsn.insn = NULL;
- if (is_kernel((unsigned long)p->addr))
- p->ainsn.insn = get_s390_insn_slot();
- else if (is_module_addr(p->addr))
- p->ainsn.insn = get_insn_slot();
- return p->ainsn.insn ? 0 : -ENOMEM;
-}
-NOKPROBE_SYMBOL(s390_get_insn_slot);
-
-static void s390_free_insn_slot(struct kprobe *p)
-{
- if (!p->ainsn.insn)
- return;
- if (is_kernel((unsigned long)p->addr))
- free_s390_insn_slot(p->ainsn.insn, 0);
- else
- free_insn_slot(p->ainsn.insn, 0);
- p->ainsn.insn = NULL;
-}
-NOKPROBE_SYMBOL(s390_free_insn_slot);
-
/* Check if paddr is at an instruction boundary */
static bool can_probe(unsigned long paddr)
{
@@ -174,7 +123,8 @@ int arch_prepare_kprobe(struct kprobe *p)
/* Make sure the probe isn't going on a difficult instruction */
if (probe_is_prohibited_opcode(p->addr))
return -EINVAL;
- if (s390_get_insn_slot(p))
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
return -ENOMEM;
copy_instruction(p);
return 0;
@@ -216,7 +166,10 @@ NOKPROBE_SYMBOL(arch_disarm_kprobe);
void arch_remove_kprobe(struct kprobe *p)
{
- s390_free_insn_slot(p);
+ if (!p->ainsn.insn)
+ return;
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
}
NOKPROBE_SYMBOL(arch_remove_kprobe);
diff --git a/arch/s390/kernel/kprobes.h b/arch/s390/kernel/kprobes.h
deleted file mode 100644
index dc3ed5098ee7..000000000000
--- a/arch/s390/kernel/kprobes.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-#ifndef _ARCH_S390_KPROBES_H
-#define _ARCH_S390_KPROBES_H
-
-#include <linux/kprobes.h>
-
-DEFINE_INSN_CACHE_OPS(s390_insn);
-
-#endif
diff --git a/arch/s390/kernel/kprobes_insn_page.S b/arch/s390/kernel/kprobes_insn_page.S
deleted file mode 100644
index 0fe4d725e98b..000000000000
--- a/arch/s390/kernel/kprobes_insn_page.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include <linux/linkage.h>
-
-/*
- * insn_page is a special 4k aligned dummy function for kprobes.
- * It will contain all kprobed instructions that are out-of-line executed.
- * The page must be within the kernel image to guarantee that the
- * out-of-line instructions are within 2GB distance of their original
- * location. Using a dummy function ensures that the insn_page is within
- * the text section of the kernel and mapped read-only/executable from
- * the beginning on, thus avoiding to split large mappings if the page
- * would be in the data section instead.
- */
- .section .kprobes.text, "ax"
- .balign 4096
-SYM_CODE_START(kprobes_insn_page)
- .rept 2048
- .word 0x07fe
- .endr
-SYM_CODE_END(kprobes_insn_page)
- .previous
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index dfa77da2fd2e..5fff629b1a89 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- struct stack_frame_user __user *sf;
- unsigned long ip, sp;
- bool first = true;
-
- if (is_compat_task())
- return;
- perf_callchain_store(entry, instruction_pointer(regs));
- sf = (void __user *)user_stack_pointer(regs);
- pagefault_disable();
- while (entry->nr < entry->max_stack) {
- if (__get_user(sp, &sf->back_chain))
- break;
- if (__get_user(ip, &sf->gprs[8]))
- break;
- if (ip & 0x1) {
- /*
- * If the instruction address is invalid, and this
- * is the first stack frame, assume r14 has not
- * been written to the stack yet. Otherwise exit.
- */
- if (first && !(regs->gprs[14] & 0x1))
- ip = regs->gprs[14];
- else
- break;
- }
- perf_callchain_store(entry, ip);
- /* Sanity check: ABI requires SP to be aligned 8 bytes. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
- first = false;
- }
- pagefault_enable();
+ arch_stack_walk_user_common(NULL, NULL, entry, regs, true);
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index dd456b475861..d8740631df4b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -86,11 +86,6 @@ void arch_release_task_struct(struct task_struct *tsk)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- /*
- * Save the floating-point or vector register state of the current
- * task and set the TIF_FPU flag to lazy restore the FPU register
- * state when returning to user space.
- */
save_user_fpu_regs();
*dst = *src;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index cbd5290939df..90c2c786bb35 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -155,7 +155,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+u64 alt_stfle_fac_list[16];
struct oldmem_data __bootdata_preserved(oldmem_data);
unsigned long VMALLOC_START;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 7c294da45bf5..640363b2a105 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2006
*/
+#include <linux/perf_event.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
@@ -62,46 +63,106 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return 0;
}
-void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
- const struct pt_regs *regs)
+static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry, bool perf,
+ unsigned long ip)
+{
+#ifdef CONFIG_PERF_EVENTS
+ if (perf) {
+ if (perf_callchain_store(entry, ip))
+ return false;
+ return true;
+ }
+#endif
+ return consume_entry(cookie, ip);
+}
+
+static inline bool ip_invalid(unsigned long ip)
{
+ /*
+ * Perform some basic checks if an instruction address taken
+ * from unreliable source is invalid.
+ */
+ if (ip & 1)
+ return true;
+ if (ip < mmap_min_addr)
+ return true;
+ if (ip >= current->mm->context.asce_limit)
+ return true;
+ return false;
+}
+
+static inline bool ip_within_vdso(unsigned long ip)
+{
+ return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
+}
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf)
+{
+ struct stack_frame_vdso_wrapper __user *sf_vdso;
struct stack_frame_user __user *sf;
unsigned long ip, sp;
bool first = true;
if (is_compat_task())
return;
- if (!consume_entry(cookie, instruction_pointer(regs)))
+ if (!current->mm)
+ return;
+ ip = instruction_pointer(regs);
+ if (!store_ip(consume_entry, cookie, entry, perf, ip))
return;
sf = (void __user *)user_stack_pointer(regs);
pagefault_disable();
while (1) {
if (__get_user(sp, &sf->back_chain))
break;
- if (__get_user(ip, &sf->gprs[8]))
+ /*
+ * VDSO entry code has a non-standard stack frame layout.
+ * See VDSO user wrapper code for details.
+ */
+ if (!sp && ip_within_vdso(ip)) {
+ sf_vdso = (void __user *)sf;
+ if (__get_user(ip, &sf_vdso->return_address))
+ break;
+ sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
+ sf = (void __user *)sp;
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ } else {
+ sf = (void __user *)sp;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ }
+ /* Sanity check: ABI requires SP to be 8 byte aligned. */
+ if (sp & 0x7)
break;
- if (ip & 0x1) {
+ if (ip_invalid(ip)) {
/*
* If the instruction address is invalid, and this
* is the first stack frame, assume r14 has not
* been written to the stack yet. Otherwise exit.
*/
- if (first && !(regs->gprs[14] & 0x1))
- ip = regs->gprs[14];
- else
+ if (!first)
+ break;
+ ip = regs->gprs[14];
+ if (ip_invalid(ip))
break;
}
- if (!consume_entry(cookie, ip))
- break;
- /* Sanity check: ABI requires SP to be aligned 8 bytes. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
+ if (!store_ip(consume_entry, cookie, entry, perf, ip))
+ return;
first = false;
}
pagefault_enable();
}
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
+}
+
unsigned long return_address(unsigned int n)
{
struct unwind_state state;
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a45b3a4c91db..2f967ac2b8e3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
return addr;
}
-unsigned long vdso_size(void)
+unsigned long vdso_text_size(void)
{
- unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long size;
if (is_compat_task())
- size += vdso32_end - vdso32_start;
+ size = vdso32_end - vdso32_start;
else
- size += vdso64_end - vdso64_start;
+ size = vdso64_end - vdso64_start;
return PAGE_ALIGN(size);
}
+unsigned long vdso_size(void)
+{
+ return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index df928fee26b5..2c5afb88d298 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -17,8 +17,10 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_32 += -m31 -s
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
+KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
--hash-style=both --build-id=sha1 -melf_s390 -T
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 6da1b9ad8ab0..ba19c0ca7c87 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -22,9 +22,11 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
+KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
ldflags-y := -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index 85247ef5a41b..e26e68675c08 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -6,8 +6,6 @@
#include <asm/dwarf.h>
#include <asm/ptrace.h>
-#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8)
-
/*
* Older glibc version called vdso without allocating a stackframe. This wrapper
* is just used to allocate a stackframe. See
@@ -20,16 +18,17 @@
__ALIGN
__kernel_\func:
CFI_STARTPROC
- aghi %r15,-WRAPPER_FRAME_SIZE
- CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
- stg %r14,STACK_FRAME_OVERHEAD(%r15)
- CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
+ aghi %r15,-STACK_FRAME_VDSO_OVERHEAD
+ CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD)
+ CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
+ stg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
+ CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
+ xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
brasl %r14,__s390_vdso_\func
- lg %r14,STACK_FRAME_OVERHEAD(%r15)
+ lg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_RESTORE 14
- aghi %r15,WRAPPER_FRAME_SIZE
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
+ aghi %r15,STACK_FRAME_VDSO_OVERHEAD
+ CFI_DEF_CFA_OFFSET STACK_FRAME_USER_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 24a18e5ef6e8..ffc1db0cbf9c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -33,14 +33,6 @@ static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
-static inline u64 get_vtimer(void)
-{
- u64 timer;
-
- asm volatile("stpt %0" : "=Q" (timer));
- return timer;
-}
-
static inline void set_vtimer(u64 expires)
{
u64 timer;
@@ -223,7 +215,7 @@ static u64 vtime_delta(void)
{
u64 timer = S390_lowcore.last_update_timer;
- S390_lowcore.last_update_timer = get_vtimer();
+ S390_lowcore.last_update_timer = get_cpu_timer();
return timer - S390_lowcore.last_update_timer;
}