summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig42
-rw-r--r--arch/s390/Makefile12
-rw-r--r--arch/s390/boot/compressed/misc.c3
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c4
-rw-r--r--arch/s390/defconfig7
-rw-r--r--arch/s390/hypfs/Makefile1
-rw-r--r--arch/s390/hypfs/hypfs.h7
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c49
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c139
-rw-r--r--arch/s390/hypfs/inode.c62
-rw-r--r--arch/s390/include/asm/cpu_mf.h14
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/ftrace.h15
-rw-r--r--arch/s390/include/asm/jump_label.h10
-rw-r--r--arch/s390/include/asm/kvm_host.h102
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/include/asm/pci_io.h1
-rw-r--r--arch/s390/include/asm/pgtable.h8
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/reset.h3
-rw-r--r--arch/s390/include/asm/sclp.h11
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/string.h1
-rw-r--r--arch/s390/include/asm/sysinfo.h30
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/asm/topology.h24
-rw-r--r--arch/s390/include/uapi/asm/hypfs.h35
-rw-r--r--arch/s390/include/uapi/asm/kvm.h41
-rw-r--r--arch/s390/include/uapi/asm/sie.h4
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/base.S3
-rw-r--r--arch/s390/kernel/cache.c390
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/early.c20
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/ftrace.c137
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/ipl.c11
-rw-r--r--arch/s390/kernel/jump_label.c67
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c19
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/module.c3
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/processor.c10
-rw-r--r--arch/s390/kernel/sclp.S3
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c315
-rw-r--r--arch/s390/kernel/swsusp_asm64.S11
-rw-r--r--arch/s390/kernel/sysinfo.c37
-rw-r--r--arch/s390/kernel/time.c20
-rw-r--r--arch/s390/kernel/topology.c181
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S6
-rw-r--r--arch/s390/kernel/vtime.c58
-rw-r--r--arch/s390/kvm/diag.c6
-rw-r--r--arch/s390/kvm/gaccess.c300
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/guestdbg.c8
-rw-r--r--arch/s390/kvm/intercept.c42
-rw-r--r--arch/s390/kvm/interrupt.c1284
-rw-r--r--arch/s390/kvm/kvm-s390.c969
-rw-r--r--arch/s390/kvm/kvm-s390.h69
-rw-r--r--arch/s390/kvm/priv.c157
-rw-r--r--arch/s390/kvm/sigp.c167
-rw-r--r--arch/s390/kvm/trace-s390.h14
-rw-r--r--arch/s390/lib/spinlock.c52
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c20
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--arch/s390/mm/mmap.c145
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/pci/pci.c62
-rw-r--r--arch/s390/pci/pci_mmio.c21
85 files changed, 3807 insertions, 1574 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index eba9c1d0dab5..b2d7ec1669b4 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -66,6 +66,7 @@ config S390
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
@@ -116,7 +117,6 @@ config S390
select HAVE_BPF_JIT if 64BIT && PACK_STACK
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
- select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
@@ -152,7 +152,6 @@ config S390
select TTY
select VIRT_CPU_ACCOUNTING
select VIRT_TO_BUS
- select ARCH_HAS_SG_CHAIN
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -188,6 +187,10 @@ config HAVE_MARCH_ZEC12_FEATURES
def_bool n
select HAVE_MARCH_Z196_FEATURES
+config HAVE_MARCH_Z13_FEATURES
+ def_bool n
+ select HAVE_MARCH_ZEC12_FEATURES
+
choice
prompt "Processor type"
default MARCH_G5
@@ -247,6 +250,14 @@ config MARCH_ZEC12
2827 series). The kernel will be slightly faster but will not work on
older machines.
+config MARCH_Z13
+ bool "IBM z13"
+ select HAVE_MARCH_Z13_FEATURES if 64BIT
+ help
+ Select this to enable optimizations for IBM z13 (2964 series).
+ The kernel will be slightly faster but will not work on older
+ machines.
+
endchoice
config MARCH_G5_TUNE
@@ -270,6 +281,9 @@ config MARCH_Z196_TUNE
config MARCH_ZEC12_TUNE
def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
+config MARCH_Z13_TUNE
+ def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -308,6 +322,9 @@ config TUNE_Z196
config TUNE_ZEC12
bool "IBM zBC12 and zEC12"
+config TUNE_Z13
+ bool "IBM z13"
+
endchoice
config 64BIT
@@ -359,14 +376,14 @@ config SMP
Even if you don't know what to do here, say Y.
config NR_CPUS
- int "Maximum number of CPUs (2-256)"
- range 2 256
+ int "Maximum number of CPUs (2-512)"
+ range 2 512
depends on SMP
default "32" if !64BIT
default "64" if 64BIT
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 256 and the
+ kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
@@ -381,17 +398,26 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_SMT
+ def_bool n
+
config SCHED_MC
def_bool n
config SCHED_BOOK
+ def_bool n
+
+config SCHED_TOPOLOGY
def_bool y
- prompt "Book scheduler support"
+ prompt "Topology scheduler support"
depends on SMP
+ select SCHED_SMT
select SCHED_MC
+ select SCHED_BOOK
help
- Book scheduler support improves the CPU scheduler's decision making
- when dealing with machines that have several books.
+ Topology scheduler support improves the CPU scheduler's decision
+ making when dealing with machines that have multi-threading,
+ multiple cores or multiple books.
source kernel/Kconfig.preempt
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 878e67973151..acb6859c6a95 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
+mflags-$(CONFIG_MARCH_Z13) := -march=z13
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
@@ -53,6 +54,7 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
+cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
@@ -85,6 +87,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_FUNCTION_TRACER
+# make use of hotpatch feature if the compiler supports it
+cc_hotpatch := -mhotpatch=0,3
+ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
+CC_FLAGS_FTRACE := $(cc_hotpatch)
+KBUILD_AFLAGS += -DCC_USING_HOTPATCH
+KBUILD_CFLAGS += -DCC_USING_HOTPATCH
+endif
+endif
+
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_AFLAGS += $(aflags-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 57cbaff1f397..42506b371b74 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -8,6 +8,7 @@
#include <asm/uaccess.h>
#include <asm/page.h>
+#include <asm/sclp.h>
#include <asm/ipl.h>
#include "sizes.h"
@@ -63,8 +64,6 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_unxz.c"
#endif
-extern _sclp_print_early(const char *);
-
static int puts(const char *s)
{
_sclp_print_early(s);
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 9432d0f202ef..64707750c780 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -555,7 +555,6 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
-CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_RB=y
@@ -563,6 +562,7 @@ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 219dca6ea926..5c3097272cd8 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -540,6 +540,7 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 822c2f2e0c25..bda70f1ffd2c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -537,6 +537,7 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 9d63051ebec4..1b0184a0f7f2 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -71,6 +71,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 1f272b24fc0b..5566ce80abdb 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -134,7 +134,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
@@ -159,7 +159,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 785c5f24d6f9..83ef702d2403 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -14,7 +14,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
@@ -22,12 +21,8 @@ CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_XZ=y
-CONFIG_RD_LZO=y
-CONFIG_RD_LZ4=y
CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 06f8d95a16cd..2ee25ba252d6 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
+s390_hypfs-objs += hypfs_diag0c.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index b34b5ab90a31..eecde500ed49 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -37,6 +37,10 @@ extern int hypfs_vm_init(void);
extern void hypfs_vm_exit(void);
extern int hypfs_vm_create_files(struct dentry *root);
+/* VM diagnose 0c */
+int hypfs_diag0c_init(void);
+void hypfs_diag0c_exit(void);
+
/* Set Partition-Resource Parameter */
int hypfs_sprp_init(void);
void hypfs_sprp_exit(void);
@@ -49,7 +53,6 @@ struct hypfs_dbfs_data {
void *buf_free_ptr;
size_t size;
struct hypfs_dbfs_file *dbfs_file;
- struct kref kref;
};
struct hypfs_dbfs_file {
@@ -61,8 +64,6 @@ struct hypfs_dbfs_file {
unsigned long);
/* Private data for hypfs_dbfs.c */
- struct hypfs_dbfs_data *data;
- struct delayed_work data_free_work;
struct mutex lock;
struct dentry *dentry;
};
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 47fe1055c714..752f6df3e697 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -17,33 +17,16 @@ static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
- kref_init(&data->kref);
data->dbfs_file = f;
return data;
}
-static void hypfs_dbfs_data_free(struct kref *kref)
+static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data)
{
- struct hypfs_dbfs_data *data;
-
- data = container_of(kref, struct hypfs_dbfs_data, kref);
data->dbfs_file->data_free(data->buf_free_ptr);
kfree(data);
}
-static void data_free_delayed(struct work_struct *work)
-{
- struct hypfs_dbfs_data *data;
- struct hypfs_dbfs_file *df;
-
- df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
- mutex_lock(&df->lock);
- data = df->data;
- df->data = NULL;
- mutex_unlock(&df->lock);
- kref_put(&data->kref, hypfs_dbfs_data_free);
-}
-
static ssize_t dbfs_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
@@ -56,28 +39,21 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
df = file_inode(file)->i_private;
mutex_lock(&df->lock);
- if (!df->data) {
- data = hypfs_dbfs_data_alloc(df);
- if (!data) {
- mutex_unlock(&df->lock);
- return -ENOMEM;
- }
- rc = df->data_create(&data->buf, &data->buf_free_ptr,
- &data->size);
- if (rc) {
- mutex_unlock(&df->lock);
- kfree(data);
- return rc;
- }
- df->data = data;
- schedule_delayed_work(&df->data_free_work, HZ);
+ data = hypfs_dbfs_data_alloc(df);
+ if (!data) {
+ mutex_unlock(&df->lock);
+ return -ENOMEM;
+ }
+ rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size);
+ if (rc) {
+ mutex_unlock(&df->lock);
+ kfree(data);
+ return rc;
}
- data = df->data;
- kref_get(&data->kref);
mutex_unlock(&df->lock);
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
- kref_put(&data->kref, hypfs_dbfs_data_free);
+ hypfs_dbfs_data_free(data);
return rc;
}
@@ -108,7 +84,6 @@ int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
if (IS_ERR(df->dentry))
return PTR_ERR(df->dentry);
mutex_init(&df->lock);
- INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
return 0;
}
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
new file mode 100644
index 000000000000..d4c0d3717543
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -0,0 +1,139 @@
+/*
+ * Hypervisor filesystem for Linux on s390
+ *
+ * Diag 0C implementation
+ *
+ * Copyright IBM Corp. 2014
+ */
+
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/hypfs.h>
+#include "hypfs.h"
+
+#define DBFS_D0C_HDR_VERSION 0
+
+/*
+ * Execute diagnose 0c in 31 bit mode
+ */
+static void diag0c(struct hypfs_diag0c_entry *entry)
+{
+ asm volatile (
+#ifdef CONFIG_64BIT
+ " sam31\n"
+ " diag %0,%0,0x0c\n"
+ " sam64\n"
+#else
+ " diag %0,%0,0x0c\n"
+#endif
+ : /* no output register */
+ : "a" (entry)
+ : "memory");
+}
+
+/*
+ * Get hypfs_diag0c_entry from CPU vector and store diag0c data
+ */
+static void diag0c_fn(void *data)
+{
+ diag0c(((void **) data)[smp_processor_id()]);
+}
+
+/*
+ * Allocate buffer and store diag 0c data
+ */
+static void *diag0c_store(unsigned int *count)
+{
+ struct hypfs_diag0c_data *diag0c_data;
+ unsigned int cpu_count, cpu, i;
+ void **cpu_vec;
+
+ get_online_cpus();
+ cpu_count = num_online_cpus();
+ cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
+ if (!cpu_vec)
+ goto fail_put_online_cpus;
+ /* Note: Diag 0c needs 8 byte alignment and real storage */
+ diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) +
+ cpu_count * sizeof(struct hypfs_diag0c_entry),
+ GFP_KERNEL | GFP_DMA);
+ if (!diag0c_data)
+ goto fail_kfree_cpu_vec;
+ i = 0;
+ /* Fill CPU vector for each online CPU */
+ for_each_online_cpu(cpu) {
+ diag0c_data->entry[i].cpu = cpu;
+ cpu_vec[cpu] = &diag0c_data->entry[i++];
+ }
+ /* Collect data all CPUs */
+ on_each_cpu(diag0c_fn, cpu_vec, 1);
+ *count = cpu_count;
+ kfree(cpu_vec);
+ put_online_cpus();
+ return diag0c_data;
+
+fail_kfree_cpu_vec:
+ kfree(cpu_vec);
+fail_put_online_cpus:
+ put_online_cpus();
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Hypfs DBFS callback: Free diag 0c data
+ */
+static void dbfs_diag0c_free(const void *data)
+{
+ kfree(data);
+}
+
+/*
+ * Hypfs DBFS callback: Create diag 0c data
+ */
+static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
+{
+ struct hypfs_diag0c_data *diag0c_data;
+ unsigned int count;
+
+ diag0c_data = diag0c_store(&count);
+ if (IS_ERR(diag0c_data))
+ return PTR_ERR(diag0c_data);
+ memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
+ get_tod_clock_ext(diag0c_data->hdr.tod_ext);
+ diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
+ diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
+ diag0c_data->hdr.count = count;
+ *data = diag0c_data;
+ *data_free_ptr = diag0c_data;
+ *size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
+ return 0;
+}
+
+/*
+ * Hypfs DBFS file structure
+ */
+static struct hypfs_dbfs_file dbfs_file_0c = {
+ .name = "diag_0c",
+ .data_create = dbfs_diag0c_create,
+ .data_free = dbfs_diag0c_free,
+};
+
+/*
+ * Initialize diag 0c interface for z/VM
+ */
+int __init hypfs_diag0c_init(void)
+{
+ if (!MACHINE_IS_VM)
+ return 0;
+ return hypfs_dbfs_create_file(&dbfs_file_0c);
+}
+
+/*
+ * Shutdown diag 0c interface for z/VM
+ */
+void hypfs_diag0c_exit(void)
+{
+ if (!MACHINE_IS_VM)
+ return;
+ hypfs_dbfs_remove_file(&dbfs_file_0c);
+}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c952b981e4f2..99824ff8dd35 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
parent = dentry->d_parent;
mutex_lock(&parent->d_inode->i_mutex);
if (hypfs_positive(dentry)) {
- if (S_ISDIR(dentry->d_inode->i_mode))
+ if (d_is_dir(dentry))
simple_rmdir(parent->d_inode, dentry);
else
simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
return nonseekable_open(inode, filp);
}
-static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
+static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- char *data;
- ssize_t ret;
- struct file *filp = iocb->ki_filp;
- /* XXX: temporary */
- char __user *buf = iov[0].iov_base;
- size_t count = iov[0].iov_len;
-
- if (nr_segs != 1)
- return -EINVAL;
-
- data = filp->private_data;
- ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
- if (ret <= 0)
- return ret;
-
- iocb->ki_pos += ret;
- file_accessed(filp);
+ struct file *file = iocb->ki_filp;
+ char *data = file->private_data;
+ size_t available = strlen(data);
+ loff_t pos = iocb->ki_pos;
+ size_t count;
- return ret;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !iov_iter_count(to))
+ return 0;
+ count = copy_to_iter(data + pos, available - pos, to);
+ if (!count)
+ return -EFAULT;
+ iocb->ki_pos = pos + count;
+ file_accessed(file);
+ return count;
}
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
+
+static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
int rc;
struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
struct hypfs_sb_info *fs_info = sb->s_fs_info;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(from);
/*
* Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
}
hypfs_update_update(sb);
rc = count;
+ iov_iter_advance(from, count);
out:
mutex_unlock(&fs_info->lock);
return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
static const struct file_operations hypfs_file_ops = {
.open = hypfs_open,
.release = hypfs_release,
- .read = do_sync_read,
- .write = do_sync_write,
- .aio_read = hypfs_aio_read,
- .aio_write = hypfs_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = hypfs_read_iter,
+ .write_iter = hypfs_write_iter,
.llseek = no_llseek,
};
@@ -482,10 +479,14 @@ static int __init hypfs_init(void)
rc = -ENODATA;
goto fail_hypfs_vm_exit;
}
+ if (hypfs_diag0c_init()) {
+ rc = -ENODATA;
+ goto fail_hypfs_sprp_exit;
+ }
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
if (!s390_kobj) {
rc = -ENOMEM;
- goto fail_hypfs_sprp_exit;
+ goto fail_hypfs_diag0c_exit;
}
rc = register_filesystem(&hypfs_type);
if (rc)
@@ -494,6 +495,8 @@ static int __init hypfs_init(void)
fail_filesystem:
kobject_put(s390_kobj);
+fail_hypfs_diag0c_exit:
+ hypfs_diag0c_exit();
fail_hypfs_sprp_exit:
hypfs_sprp_exit();
fail_hypfs_vm_exit:
@@ -510,6 +513,7 @@ static void __exit hypfs_exit(void)
{
unregister_filesystem(&hypfs_type);
kobject_put(s390_kobj);
+ hypfs_diag0c_exit();
hypfs_sprp_exit();
hypfs_vm_exit();
hypfs_diag_exit();
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index cb700d54bd83..5243a8679a1d 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val)
return cc;
}
+/* Store CPU counter multiple for the MT utilization counter set */
+static inline int stcctm5(u64 num, u64 *val)
+{
+ typedef struct { u64 _[num]; } addrtype;
+ int cc;
+
+ asm volatile (
+ " .insn rsy,0xeb0000000017,%2,5,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
+ return cc;
+}
+
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f6e43d39e3d8..c9c875d9ed31 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
+extern unsigned long randomize_et_dyn(void);
+#define ELF_ET_DYN_BASE randomize_et_dyn()
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do { \
} while (0)
#endif /* CONFIG_COMPAT */
-#define STACK_RND_MASK 0x7ffUL
+extern unsigned long mmap_rnd_mask;
+
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
#define ARCH_DLINFO \
do { \
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index abb618f1ead2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -3,8 +3,12 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifdef CC_USING_HOTPATCH
+#define MCOUNT_INSN_SIZE 6
+#else
#define MCOUNT_INSN_SIZE 24
#define MCOUNT_RETURN_FIXUP 18
+#endif
#ifndef __ASSEMBLY__
@@ -37,18 +41,29 @@ struct ftrace_insn {
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
/* jg .+24 */
insn->opc = 0xc0f4;
insn->disp = MCOUNT_INSN_SIZE / 2;
#endif
+#endif
}
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ if (insn->disp == 0)
+ return 1;
+#else
if (insn->disp == MCOUNT_INSN_SIZE / 2)
return 1;
#endif
+#endif
return 0;
}
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 346b1c85ffb4..2b77e235b5fb 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -1,9 +1,12 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
+#define JUMP_LABEL_NOP_OFFSET 2
#ifdef CONFIG_64BIT
#define ASM_PTR ".quad"
@@ -13,9 +16,13 @@
#define ASM_ALIGN ".balign 4"
#endif
+/*
+ * We use a brcl 0,2 instruction for jump labels at compile time so it
+ * can be easily distinguished from a hotpatch generated instruction.
+ */
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm_volatile_goto("0: brcl 0,0\n"
+ asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
@@ -34,4 +41,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 9cba74d5d853..d01fc588b5c3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,11 +35,13 @@
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
-#define SIGP_CTRL_C 0x00800000
+#define SIGP_CTRL_C 0x80
+#define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry {
- atomic_t ctrl;
- __u32 reserved;
+ __u8 reserved0;
+ __u8 sigp_ctrl;
+ __u16 reserved[3];
__u64 sda;
__u64 reserved2[2];
} __attribute__((packed));
@@ -87,7 +89,8 @@ struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */
__u32 : 1; /* 0x0004 */
__u32 prefix : 18;
- __u32 : 13;
+ __u32 : 1;
+ __u32 ibc : 12;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
@@ -132,7 +135,9 @@ struct kvm_s390_sie_block {
__u8 reserved60; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
- __u8 reserved63[1]; /* 0x0063 */
+#define ECB3_AES 0x04
+#define ECB3_DEA 0x08
+ __u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
@@ -159,6 +164,7 @@ struct kvm_s390_sie_block {
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
@@ -166,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
- __u8 reserved1c0[30]; /* 0x01c0 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
@@ -177,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256];
} __packed;
+struct kvm_s390_vregs {
+ __vector128 vrs[32];
+ __u8 reserved200[512]; /* for future vector expansion */
+} __packed;
+
struct sie_page {
struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */
- __u8 reserved700[2304]; /* 0x0700 */
+ __u8 reserved700[1280]; /* 0x0700 */
+ struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed;
struct kvm_vcpu_stat {
@@ -192,6 +206,7 @@ struct kvm_vcpu_stat {
u32 exit_stop_request;
u32 exit_validity;
u32 exit_instruction;
+ u32 halt_successful_poll;
u32 halt_wakeup;
u32 instruction_lctl;
u32 instruction_lctlg;
@@ -231,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status;
+ u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
@@ -263,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16
+#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f
@@ -327,6 +344,11 @@ enum irq_types {
IRQ_PEND_COUNT
};
+/* We have 2M for virtio device descriptor pages. Smallest amount of
+ * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
+ */
+#define KVM_S390_MAX_VIRTIO_IRQS 87381
+
/*
* Repressible (non-floating) machine check interrupts
* subclass bits in MCIC
@@ -378,14 +400,11 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
};
};
-/* for local_interrupt.action_flags */
-#define ACTION_STORE_ON_STOP (1<<0)
-#define ACTION_STOP_ON_STOP (1<<1)
-
struct kvm_s390_irq_payload {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
@@ -393,6 +412,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
};
@@ -401,19 +421,37 @@ struct kvm_s390_local_interrupt {
struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq;
atomic_t *cpuflags;
- unsigned int action_bits;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq;
unsigned long pending_irqs;
};
+#define FIRQ_LIST_IO_ISC_0 0
+#define FIRQ_LIST_IO_ISC_1 1
+#define FIRQ_LIST_IO_ISC_2 2
+#define FIRQ_LIST_IO_ISC_3 3
+#define FIRQ_LIST_IO_ISC_4 4
+#define FIRQ_LIST_IO_ISC_5 5
+#define FIRQ_LIST_IO_ISC_6 6
+#define FIRQ_LIST_IO_ISC_7 7
+#define FIRQ_LIST_PFAULT 8
+#define FIRQ_LIST_VIRTIO 9
+#define FIRQ_LIST_COUNT 10
+#define FIRQ_CNTR_IO 0
+#define FIRQ_CNTR_SERVICE 1
+#define FIRQ_CNTR_VIRTIO 2
+#define FIRQ_CNTR_PFAULT 3
+#define FIRQ_MAX_COUNT 4
+
struct kvm_s390_float_interrupt {
+ unsigned long pending_irqs;
spinlock_t lock;
- struct list_head list;
- atomic_t active;
+ struct list_head lists[FIRQ_LIST_COUNT];
+ int counters[FIRQ_MAX_COUNT];
+ struct kvm_s390_mchk_info mchk;
+ struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int irq_count;
};
struct kvm_hw_wp_info_arch {
@@ -461,6 +499,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs;
+ struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
@@ -470,7 +509,6 @@ struct kvm_vcpu_arch {
};
struct gmap *gmap;
struct kvm_guestdbg_info_arch guestdbg;
-#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
unsigned long pfault_token;
unsigned long pfault_select;
unsigned long pfault_compare;
@@ -504,13 +542,39 @@ struct s390_io_adapter {
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256
+/* maximum size of facilities and facility mask is 2k bytes */
+#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
+#define S390_ARCH_FAC_LIST_SIZE_U64 \
+ (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
+#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
+#define S390_ARCH_FAC_MASK_SIZE_U64 \
+ (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
+
+struct kvm_s390_fac {
+ /* facility list requested by guest */
+ __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* facility mask supported by kvm & hosting machine */
+ __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
+};
+
+struct kvm_s390_cpu_model {
+ struct kvm_s390_fac *fac;
+ struct cpuid cpu_id;
+ unsigned short ibc;
+};
+
struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb;
__u32 crycbd;
+ __u8 aes_kw;
+ __u8 dea_kw;
};
struct kvm_s390_crypto_cb {
- __u8 reserved00[128]; /* 0x0000 */
+ __u8 reserved00[72]; /* 0x0000 */
+ __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
+ __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
+ __u8 reserved80[128]; /* 0x0080 */
};
struct kvm_arch{
@@ -523,12 +587,16 @@ struct kvm_arch{
int use_irqchip;
int use_cmma;
int user_cpu_state_ctrl;
+ int user_sigp;
+ int user_stsi;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
spinlock_t start_stop_lock;
+ struct kvm_s390_cpu_model model;
struct kvm_s390_crypto crypto;
+ u64 epoch;
};
#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..8fb3802f8fad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
#endif
}
-static inline void clear_page(void *page)
-{
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1)
- : "memory", "cc");
-}
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index f664e96f48c7..1a9a98de5bde 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -16,6 +16,7 @@
struct zpci_iomap_entry {
u32 fh;
u8 bar;
+ u16 count;
};
extern struct zpci_iomap_entry *zpci_iomap_start;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ffb1d8ce97ae..e08ec38f8c6e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask;
*/
#define PTRS_PER_PTE 256
#ifndef CONFIG_64BIT
+#define __PAGETABLE_PUD_FOLDED
#define PTRS_PER_PMD 1
+#define __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PUD 1
#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
@@ -99,7 +101,7 @@ extern unsigned long zero_page_mask;
#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
@@ -1758,6 +1760,10 @@ extern int s390_enable_sie(void);
extern int s390_enable_skey(void);
extern void s390_reset_cmma(struct mm_struct *mm);
+/* s390 has a private copy of get unmapped area to deal with cache synonyms */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
/*
* No page table caches to initialise
*/
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bed05ea7ec27..e7cbbdcdee13 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -215,10 +215,7 @@ static inline unsigned short stap(void)
/*
* Give up the time slice of the virtual PU.
*/
-static inline void cpu_relax(void)
-{
- barrier();
-}
+void cpu_relax(void);
#define cpu_relax_lowlatency() barrier()
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index 804578587a7a..72786067b300 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -15,5 +15,6 @@ struct reset_call {
extern void register_reset_call(struct reset_call *reset);
extern void unregister_reset_call(struct reset_call *reset);
-extern void s390_reset_system(void (*func)(void *), void *data);
+extern void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data);
#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1aba89b53cb9..f1096bab5199 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -27,11 +27,12 @@ struct sclp_ipl_info {
};
struct sclp_cpu_entry {
- u8 address;
+ u8 core_id;
u8 reserved0[2];
u8 : 3;
u8 siif : 1;
- u8 : 4;
+ u8 sigpif : 1;
+ u8 : 3;
u8 reserved2[10];
u8 type;
u8 reserved1;
@@ -51,6 +52,9 @@ int sclp_cpu_deconfigure(u8 cpu);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
unsigned int sclp_get_max_cpu(void);
+unsigned int sclp_get_mtid(u8 cpu_type);
+unsigned int sclp_get_mtid_max(void);
+unsigned int sclp_get_mtid_prev(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
@@ -66,6 +70,9 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void);
int sclp_has_siif(void);
+int sclp_has_sigpif(void);
unsigned int sclp_get_ibc(void);
+long _sclp_print_early(const char *);
+
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7736fdd72595..b8d1e54b4733 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -57,6 +57,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_FLAG_VX (1UL << 18)
+#define MACHINE_FLAG_CAD (1UL << 19)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -80,6 +81,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (0)
#define MACHINE_HAS_TLB_LC (0)
#define MACHINE_HAS_VX (0)
+#define MACHINE_HAS_CAD (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -93,6 +95,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
+#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
#endif /* CONFIG_64BIT */
/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index fad4ae23ece0..ec60cf7fa0a2 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -16,6 +16,7 @@
#define SIGP_SET_ARCHITECTURE 18
#define SIGP_COND_EMERGENCY_SIGNAL 19
#define SIGP_SENSE_RUNNING 21
+#define SIGP_SET_MULTI_THREADING 22
#define SIGP_STORE_ADDITIONAL_STATUS 23
/* SIGP condition codes */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 762d4f88af5a..b3bd0282dd98 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -16,6 +16,8 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
+extern unsigned int smp_cpu_mt_shift;
+extern unsigned int smp_cpu_mtid;
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
@@ -35,6 +37,8 @@ extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
+#define smp_cpu_mtid 0
+
static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
func(data);
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 7e2dcd7c57ef..8662f5c8e17f 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -44,7 +44,6 @@ extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
-#undef __HAVE_ARCH_STRNICMP
#undef __HAVE_ARCH_STRPBRK
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index f92428e459f8..f7054a892d9e 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -15,6 +15,7 @@
#define __ASM_S390_SYSINFO_H
#include <asm/bitsperlong.h>
+#include <linux/uuid.h>
struct sysinfo_1_1_1 {
unsigned char p:1;
@@ -90,7 +91,11 @@ struct sysinfo_2_2_2 {
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
- char reserved_2[16];
+ char reserved_2[8];
+ unsigned char mt_installed;
+ unsigned char mt_general;
+ unsigned char mt_psmtid;
+ char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
@@ -112,34 +117,39 @@ struct sysinfo_3_2_2 {
char name[8];
unsigned int caf;
char cpi[16];
- char reserved_1[24];
-
+ char reserved_1[3];
+ char ext_name_encoding;
+ unsigned int reserved_2;
+ uuid_be uuid;
} vm[8];
- char reserved_544[3552];
+ char reserved_3[1504];
+ char ext_names[8][256];
};
extern int topology_max_mnest;
-#define TOPOLOGY_CPU_BITS 64
+#define TOPOLOGY_CORE_BITS 64
#define TOPOLOGY_NR_MAG 6
-struct topology_cpu {
- unsigned char reserved0[4];
+struct topology_core {
+ unsigned char nl;
+ unsigned char reserved0[3];
unsigned char :6;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
- unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG];
+ unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
};
struct topology_container {
- unsigned char reserved[7];
+ unsigned char nl;
+ unsigned char reserved[6];
unsigned char id;
};
union topology_entry {
unsigned char nl;
- struct topology_cpu cpu;
+ struct topology_core cpu;
struct topology_container container;
};
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 4d62fd5b56e5..ef1df718642d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -39,7 +39,6 @@ struct thread_info {
unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
- struct restart_block restart_block;
unsigned int system_call;
__u64 user_timer;
__u64 system_timer;
@@ -56,9 +55,6 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 56af53093d24..b1453a2ae1ca 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,20 +9,24 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
struct cpu_topology_s390 {
+ unsigned short thread_id;
unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
+ cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
};
-extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
-#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
-#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
#define mc_capable() 1
@@ -47,14 +51,6 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
-#ifdef CONFIG_SCHED_BOOK
-void s390_init_cpu_topology(void);
-#else
-static inline void s390_init_cpu_topology(void)
-{
-};
-#endif
-
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h
index 37998b449531..b3fe12d8dd87 100644
--- a/arch/s390/include/uapi/asm/hypfs.h
+++ b/arch/s390/include/uapi/asm/hypfs.h
@@ -1,16 +1,19 @@
/*
- * IOCTL interface for hypfs
+ * Structures for hypfs interface
*
* Copyright IBM Corp. 2013
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#ifndef _ASM_HYPFS_CTL_H
-#define _ASM_HYPFS_CTL_H
+#ifndef _ASM_HYPFS_H
+#define _ASM_HYPFS_H
#include <linux/types.h>
+/*
+ * IOCTL for binary interface /sys/kernel/debug/diag_304
+ */
struct hypfs_diag304 {
__u32 args[2];
__u64 data;
@@ -22,4 +25,30 @@ struct hypfs_diag304 {
#define HYPFS_DIAG304 \
_IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304)
+/*
+ * Structures for binary interface /sys/kernel/debug/diag_0c
+ */
+struct hypfs_diag0c_hdr {
+ __u64 len; /* Length of diag0c buffer without header */
+ __u16 version; /* Version of header */
+ char reserved1[6]; /* Reserved */
+ char tod_ext[16]; /* TOD clock for diag0c */
+ __u64 count; /* Number of entries (CPUs) in diag0c array */
+ char reserved2[24]; /* Reserved */
+};
+
+struct hypfs_diag0c_entry {
+ char date[8]; /* MM/DD/YY in EBCDIC */
+ char time[8]; /* HH:MM:SS in EBCDIC */
+ __u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */
+ __u64 totalproc; /* Total of virtual and simulation time (us) */
+ __u32 cpu; /* Linux logical CPU number */
+ __u32 reserved; /* Align to 8 byte */
+};
+
+struct hypfs_diag0c_data {
+ struct hypfs_diag0c_hdr hdr; /* 64 byte header */
+ struct hypfs_diag0c_entry entry[]; /* diag0c entry array */
+};
+
#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 48eda3ab4944..ef1a5fcc6c66 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -57,10 +57,44 @@ struct kvm_s390_io_adapter_req {
/* kvm attr_group on vm fd */
#define KVM_S390_VM_MEM_CTRL 0
+#define KVM_S390_VM_TOD 1
+#define KVM_S390_VM_CRYPTO 2
+#define KVM_S390_VM_CPU_MODEL 3
/* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
#define KVM_S390_VM_MEM_CLR_CMMA 1
+#define KVM_S390_VM_MEM_LIMIT_SIZE 2
+
+/* kvm attributes for KVM_S390_VM_TOD */
+#define KVM_S390_VM_TOD_LOW 0
+#define KVM_S390_VM_TOD_HIGH 1
+
+/* kvm attributes for KVM_S390_VM_CPU_MODEL */
+/* processor related attributes are r/w */
+#define KVM_S390_VM_CPU_PROCESSOR 0
+struct kvm_s390_vm_cpu_processor {
+ __u64 cpuid;
+ __u16 ibc;
+ __u8 pad[6];
+ __u64 fac_list[256];
+};
+
+/* machine related attributes are r/o */
+#define KVM_S390_VM_CPU_MACHINE 1
+struct kvm_s390_vm_cpu_machine {
+ __u64 cpuid;
+ __u32 ibc;
+ __u8 pad[4];
+ __u64 fac_mask[256];
+ __u64 fac_list[256];
+};
+
+/* kvm attributes for crypto */
+#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
+#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
+#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
+#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
@@ -107,12 +141,16 @@ struct kvm_guest_debug_arch {
struct kvm_hw_breakpoint __user *hw_bp;
};
+/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
+#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL
+
#define KVM_SYNC_PREFIX (1UL << 0)
#define KVM_SYNC_GPRS (1UL << 1)
#define KVM_SYNC_ACRS (1UL << 2)
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
+#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -127,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */
+ __u64 vrs[32][2]; /* vector registers */
+ __u8 reserved[512]; /* for future vector expansion */
+ __u32 fpc; /* only valid with vector registers */
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index d4096fdfc6ab..ee69c0854c88 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table.
*/
-#define icpt_insn_decoder(insn) \
+#define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
- INSN_DECODE(insn)
+ INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 204c43a4c245..31fab2676fe9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,8 +4,8 @@
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = -pg
-CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
#
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e91605353..8dc4db10d160 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -171,6 +171,7 @@ int main(void)
#else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
+ DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 797a823a2275..f74a53d339b0 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -97,7 +97,8 @@ ENTRY(diag308_reset)
lg %r4,0(%r4) # Save PSW
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
- diag %r1,%r1,0x308
+ lghi %r0,0
+ diag %r0,%r1,0x308
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index c0b03c28d157..0969d113b3d6 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -5,37 +5,11 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/notifier.h>
#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/cacheinfo.h>
#include <asm/facility.h>
-struct cache {
- unsigned long size;
- unsigned int line_size;
- unsigned int associativity;
- unsigned int nr_sets;
- unsigned int level : 3;
- unsigned int type : 2;
- unsigned int private : 1;
- struct list_head list;
-};
-
-struct cache_dir {
- struct kobject *kobj;
- struct cache_index_dir *index;
-};
-
-struct cache_index_dir {
- struct kobject kobj;
- int cpu;
- struct cache *cache;
- struct cache_index_dir *next;
-};
-
enum {
CACHE_SCOPE_NOTEXISTS,
CACHE_SCOPE_PRIVATE,
@@ -44,10 +18,10 @@ enum {
};
enum {
- CACHE_TYPE_SEPARATE,
- CACHE_TYPE_DATA,
- CACHE_TYPE_INSTRUCTION,
- CACHE_TYPE_UNIFIED,
+ CTYPE_SEPARATE,
+ CTYPE_DATA,
+ CTYPE_INSTRUCTION,
+ CTYPE_UNIFIED,
};
enum {
@@ -70,37 +44,57 @@ struct cache_info {
};
#define CACHE_MAX_LEVEL 8
-
union cache_topology {
struct cache_info ci[CACHE_MAX_LEVEL];
unsigned long long raw;
};
static const char * const cache_type_string[] = {
- "Data",
+ "",
"Instruction",
+ "Data",
+ "",
"Unified",
};
-static struct cache_dir *cache_dir_cpu[NR_CPUS];
-static LIST_HEAD(cache_list);
+static const enum cache_type cache_type_map[] = {
+ [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
+ [CTYPE_DATA] = CACHE_TYPE_DATA,
+ [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
+ [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
+};
void show_cacheinfo(struct seq_file *m)
{
- struct cache *cache;
- int index = 0;
+ struct cpu_cacheinfo *this_cpu_ci;
+ struct cacheinfo *cache;
+ int idx;
- list_for_each_entry(cache, &cache_list, list) {
- seq_printf(m, "cache%-11d: ", index);
+ get_online_cpus();
+ this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
+ for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
+ cache = this_cpu_ci->info_list + idx;
+ seq_printf(m, "cache%-11d: ", idx);
seq_printf(m, "level=%d ", cache->level);
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
- seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
- seq_printf(m, "size=%luK ", cache->size >> 10);
- seq_printf(m, "line_size=%u ", cache->line_size);
- seq_printf(m, "associativity=%d", cache->associativity);
+ seq_printf(m, "scope=%s ",
+ cache->disable_sysfs ? "Shared" : "Private");
+ seq_printf(m, "size=%dK ", cache->size >> 10);
+ seq_printf(m, "line_size=%u ", cache->coherency_line_size);
+ seq_printf(m, "associativity=%d", cache->ways_of_associativity);
seq_puts(m, "\n");
- index++;
}
+ put_online_cpus();
+}
+
+static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
+{
+ if (level >= CACHE_MAX_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+ ci += level;
+ if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
+ return CACHE_TYPE_NOCACHE;
+ return cache_type_map[ci->type];
}
static inline unsigned long ecag(int ai, int li, int ti)
@@ -113,277 +107,71 @@ static inline unsigned long ecag(int ai, int li, int ti)
return val;
}
-static int __init cache_add(int level, int private, int type)
+static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
+ enum cache_type type, unsigned int level, int cpu)
{
- struct cache *cache;
- int ti;
+ int ti, num_sets;
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
- return -ENOMEM;
- if (type == CACHE_TYPE_INSTRUCTION)
+ if (type == CACHE_TYPE_INST)
ti = CACHE_TI_INSTRUCTION;
else
ti = CACHE_TI_UNIFIED;
- cache->size = ecag(EXTRACT_SIZE, level, ti);
- cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
- cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
- cache->nr_sets = cache->size / cache->associativity;
- cache->nr_sets /= cache->line_size;
- cache->private = private;
- cache->level = level + 1;
- cache->type = type - 1;
- list_add_tail(&cache->list, &cache_list);
- return 0;
-}
-
-static void __init cache_build_info(void)
-{
- struct cache *cache, *next;
+ this_leaf->level = level + 1;
+ this_leaf->type = type;
+ this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
+ this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
+ this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
+ num_sets = this_leaf->size / this_leaf->coherency_line_size;
+ num_sets /= this_leaf->ways_of_associativity;
+ this_leaf->number_of_sets = num_sets;
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ if (!private)
+ this_leaf->disable_sysfs = true;
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int level = 0, leaves = 0;
union cache_topology ct;
- int level, private, rc;
+ enum cache_type ctype;
+ if (!this_cpu_ci)
+ return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
- for (level = 0; level < CACHE_MAX_LEVEL; level++) {
- switch (ct.ci[level].scope) {
- case CACHE_SCOPE_SHARED:
- private = 0;
- break;
- case CACHE_SCOPE_PRIVATE:
- private = 1;
+ do {
+ ctype = get_cache_type(&ct.ci[0], level);
+ if (ctype == CACHE_TYPE_NOCACHE)
break;
- default:
- return;
- }
- if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
- rc = cache_add(level, private, CACHE_TYPE_DATA);
- rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
- } else {
- rc = cache_add(level, private, ct.ci[level].type);
- }
- if (rc)
- goto error;
- }
- return;
-error:
- list_for_each_entry_safe(cache, next, &cache_list, list) {
- list_del(&cache->list);
- kfree(cache);
- }
-}
-
-static struct cache_dir *cache_create_cache_dir(int cpu)
-{
- struct cache_dir *cache_dir;
- struct kobject *kobj = NULL;
- struct device *dev;
-
- dev = get_cpu_device(cpu);
- if (!dev)
- goto out;
- kobj = kobject_create_and_add("cache", &dev->kobj);
- if (!kobj)
- goto out;
- cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
- if (!cache_dir)
- goto out;
- cache_dir->kobj = kobj;
- cache_dir_cpu[cpu] = cache_dir;
- return cache_dir;
-out:
- kobject_put(kobj);
- return NULL;
-}
-
-static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
-{
- return container_of(kobj, struct cache_index_dir, kobj);
-}
-
-static void cache_index_release(struct kobject *kobj)
-{
- struct cache_index_dir *index;
-
- index = kobj_to_cache_index_dir(kobj);
- kfree(index);
-}
-
-static ssize_t cache_index_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct kobj_attribute *kobj_attr;
-
- kobj_attr = container_of(attr, struct kobj_attribute, attr);
- return kobj_attr->show(kobj, kobj_attr, buf);
-}
-
-#define DEFINE_CACHE_ATTR(_name, _format, _value) \
-static ssize_t cache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *buf) \
-{ \
- struct cache_index_dir *index; \
- \
- index = kobj_to_cache_index_dir(kobj); \
- return sprintf(buf, _format, _value); \
-} \
-static struct kobj_attribute cache_##_name##_attr = \
- __ATTR(_name, 0444, cache_##_name##_show, NULL);
-
-DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
-DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
-DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
-DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
-DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
-DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
-
-static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
-{
- struct cache_index_dir *index;
- int len;
-
- index = kobj_to_cache_index_dir(kobj);
- len = type ?
- cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
- cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
- len += sprintf(&buf[len], "\n");
- return len;
-}
-
-static ssize_t shared_cpu_map_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return shared_cpu_map_func(kobj, 0, buf);
-}
-static struct kobj_attribute cache_shared_cpu_map_attr =
- __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
-
-static ssize_t shared_cpu_list_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return shared_cpu_map_func(kobj, 1, buf);
-}
-static struct kobj_attribute cache_shared_cpu_list_attr =
- __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
-
-static struct attribute *cache_index_default_attrs[] = {
- &cache_type_attr.attr,
- &cache_size_attr.attr,
- &cache_number_of_sets_attr.attr,
- &cache_ways_of_associativity_attr.attr,
- &cache_level_attr.attr,
- &cache_coherency_line_size_attr.attr,
- &cache_shared_cpu_map_attr.attr,
- &cache_shared_cpu_list_attr.attr,
- NULL,
-};
-
-static const struct sysfs_ops cache_index_ops = {
- .show = cache_index_show,
-};
-
-static struct kobj_type cache_index_type = {
- .sysfs_ops = &cache_index_ops,
- .release = cache_index_release,
- .default_attrs = cache_index_default_attrs,
-};
-
-static int cache_create_index_dir(struct cache_dir *cache_dir,
- struct cache *cache, int index, int cpu)
-{
- struct cache_index_dir *index_dir;
- int rc;
-
- index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
- if (!index_dir)
- return -ENOMEM;
- index_dir->cache = cache;
- index_dir->cpu = cpu;
- rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
- cache_dir->kobj, "index%d", index);
- if (rc)
- goto out;
- index_dir->next = cache_dir->index;
- cache_dir->index = index_dir;
- return 0;
-out:
- kfree(index_dir);
- return rc;
-}
-
-static int cache_add_cpu(int cpu)
-{
- struct cache_dir *cache_dir;
- struct cache *cache;
- int rc, index = 0;
-
- if (list_empty(&cache_list))
- return 0;
- cache_dir = cache_create_cache_dir(cpu);
- if (!cache_dir)
- return -ENOMEM;
- list_for_each_entry(cache, &cache_list, list) {
- if (!cache->private)
- break;
- rc = cache_create_index_dir(cache_dir, cache, index, cpu);
- if (rc)
- return rc;
- index++;
- }
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ } while (++level < CACHE_MAX_LEVEL);
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
return 0;
}
-static void cache_remove_cpu(int cpu)
-{
- struct cache_index_dir *index, *next;
- struct cache_dir *cache_dir;
-
- cache_dir = cache_dir_cpu[cpu];
- if (!cache_dir)
- return;
- index = cache_dir->index;
- while (index) {
- next = index->next;
- kobject_put(&index->kobj);
- index = next;
- }
- kobject_put(cache_dir->kobj);
- kfree(cache_dir);
- cache_dir_cpu[cpu] = NULL;
-}
-
-static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+int populate_cache_leaves(unsigned int cpu)
{
- int cpu = (long)hcpu;
- int rc = 0;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ unsigned int level, idx, pvt;
+ union cache_topology ct;
+ enum cache_type ctype;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = cache_add_cpu(cpu);
- if (rc)
- cache_remove_cpu(cpu);
- break;
- case CPU_DEAD:
- cache_remove_cpu(cpu);
- break;
+ ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
+ for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ if (!this_leaf)
+ return -EINVAL;
+ pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
+ ctype = get_cache_type(&ct.ci[0], level);
+ if (ctype == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
+ } else {
+ ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
+ }
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
-}
-
-static int __init cache_init(void)
-{
- int cpu;
-
- if (!test_facility(34))
- return 0;
- cache_build_info();
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cache_add_cpu(cpu);
- __hotcpu_notifier(cache_hotplug, 0);
- cpu_notifier_register_done();
return 0;
}
-device_initcall(cache_init);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 34d5fa7b01b5..bc1df12dd4f8 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -209,7 +209,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
int i;
/* Alwys make any pending restarted system call return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
return -EFAULT;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f3762937dd82..533430307da8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -137,7 +137,7 @@ enum {
INSTR_RSI_RRP,
INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
- INSTR_RSY_RDRM,
+ INSTR_RSY_RDRM, INSTR_RSY_RMRD,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RS_RURD,
INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
@@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
[U16_32] = { 16, 32, 0 },
[J16_16] = { 16, 16, OPERAND_PCREL },
[J16_32] = { 16, 32, OPERAND_PCREL },
- [I16_32] = { 16, 32, OPERAND_SIGNED },
[I24_24] = { 24, 24, OPERAND_SIGNED },
[J32_16] = { 32, 16, OPERAND_PCREL },
[I32_16] = { 32, 16, OPERAND_SIGNED },
@@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
[INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
+ [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
@@ -451,7 +451,8 @@ enum {
LONG_INSN_VERLLV,
LONG_INSN_VESRAV,
LONG_INSN_VESRLV,
- LONG_INSN_VSBCBI
+ LONG_INSN_VSBCBI,
+ LONG_INSN_STCCTM
};
static char *long_insn_name[] = {
@@ -531,6 +532,7 @@ static char *long_insn_name[] = {
[LONG_INSN_VESRAV] = "vesrav",
[LONG_INSN_VESRLV] = "vesrlv",
[LONG_INSN_VSBCBI] = "vsbcbi",
+ [LONG_INSN_STCCTM] = "stcctm",
};
static struct s390_insn opcode[] = {
@@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
{ "lric", 0x60, INSTR_RSY_RDRM },
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
+ { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 302ac1f7f8e7..4427ab7ac23a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -396,6 +396,26 @@ static __init void detect_machine_facilities(void)
#endif
}
+static int __init cad_setup(char *str)
+{
+ int val;
+
+ get_option(&str, &val);
+ if (val && test_facility(128))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
+ return 0;
+}
+early_param("cad", cad_setup);
+
+static int __init cad_init(void)
+{
+ if (MACHINE_HAS_CAD)
+ /* Enable problem state CAD. */
+ __ctl_set_bit(2, 3);
+ return 0;
+}
+early_initcall(cad_init);
+
static __init void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 8e61393c8275..834df047d35f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
struct fadvise64_64_args;
struct old_sigaction;
+long sys_rt_sigreturn(void);
+long sys_sigreturn(void);
+
long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
-
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b86bb8823f15..6c79f1b44fe7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -46,10 +46,55 @@
* lg %r14,8(%r15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
+ * In case we use gcc's hotpatch feature the original and also the disabled
+ * function prologue contains only a single six byte instruction and looks
+ * like this:
+ * > brcl 0,0 # offset 0
+ * To enable ftrace the code gets patched like above and afterwards looks
+ * like this:
+ * > brasl %r0,ftrace_caller # offset 0
*/
unsigned long ftrace_plt;
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
+ /* stg r14,8(r15) */
+ insn->opc = 0xe3e0;
+ insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ if (insn->opc == BREAKPOINT_INSTRUCTION)
+ return 1;
+#endif
+ return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@@ -59,62 +104,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- struct ftrace_insn insn;
- unsigned short op;
- void *from, *to;
- size_t size;
-
- ftrace_generate_nop_insn(&insn);
- size = sizeof(insn);
- from = &insn;
- to = (void *) rec->ip;
- if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- /*
- * If we find a breakpoint instruction, a kprobe has been placed
- * at the beginning of the function. We write the constant
- * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
- * instruction so that the kprobes handler can execute a nop, if it
- * reaches this breakpoint.
- */
- if (op == BREAKPOINT_INSTRUCTION) {
- size -= 2;
- from += 2;
- to += 2;
- insn.disp = KPROBE_ON_FTRACE_NOP;
+ if (addr == MCOUNT_ADDR) {
+ /* Initial code replacement */
+ ftrace_generate_orig_insn(&orig);
+ ftrace_generate_nop_insn(&new);
+ } else if (is_kprobe_on_ftrace(&old)) {
+ /*
+ * If we find a breakpoint instruction, a kprobe has been
+ * placed at the beginning of the function. We write the
+ * constant KPROBE_ON_FTRACE_NOP into the remaining four
+ * bytes of the original instruction so that the kprobes
+ * handler can execute a nop, if it reaches this breakpoint.
+ */
+ ftrace_generate_kprobe_call_insn(&orig);
+ ftrace_generate_kprobe_nop_insn(&new);
+ } else {
+ /* Replace ftrace call with a nop. */
+ ftrace_generate_call_insn(&orig, rec->ip);
+ ftrace_generate_nop_insn(&new);
}
- if (probe_kernel_write(to, from, size))
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
return -EPERM;
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- struct ftrace_insn insn;
- unsigned short op;
- void *from, *to;
- size_t size;
-
- ftrace_generate_call_insn(&insn, rec->ip);
- size = sizeof(insn);
- from = &insn;
- to = (void *) rec->ip;
- if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- /*
- * If we find a breakpoint instruction, a kprobe has been placed
- * at the beginning of the function. We write the constant
- * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
- * instruction so that the kprobes handler can execute a brasl if it
- * reaches this breakpoint.
- */
- if (op == BREAKPOINT_INSTRUCTION) {
- size -= 2;
- from += 2;
- to += 2;
- insn.disp = KPROBE_ON_FTRACE_CALL;
+ if (is_kprobe_on_ftrace(&old)) {
+ /*
+ * If we find a breakpoint instruction, a kprobe has been
+ * placed at the beginning of the function. We write the
+ * constant KPROBE_ON_FTRACE_CALL into the remaining four
+ * bytes of the original instruction so that the kprobes
+ * handler can execute a brasl if it reaches this breakpoint.
+ */
+ ftrace_generate_kprobe_nop_insn(&orig);
+ ftrace_generate_kprobe_call_insn(&new);
+ } else {
+ /* Replace nop with an ftrace call. */
+ ftrace_generate_nop_insn(&orig);
+ ftrace_generate_call_insn(&new, rec->ip);
}
- if (probe_kernel_write(to, from, size))
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
return -EPERM;
return 0;
}
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d62eee11f0b5..132f4c9ade60 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -436,7 +436,9 @@ ENTRY(startup_kdump)
# followed by the facility words.
#if defined(CONFIG_64BIT)
-#if defined(CONFIG_MARCH_ZEC12)
+#if defined(CONFIG_MARCH_Z13)
+ .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
+#elif defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100eff2, 0xf46c0000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39badb9ca0b3..5c8651f36509 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
u32 dump_prefix_page;
-void s390_reset_system(void (*func)(void *), void *data)
+void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data)
{
struct _lowcore *lc;
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
/* Store status at absolute zero */
store_status();
+ /* Call function before reset */
+ if (fn_pre)
+ fn_pre();
do_reset_calls();
- if (func)
- func(data);
+ /* Call function after reset */
+ if (fn_post)
+ fn_post(data);
}
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index b987ab2c1541..830066f936c8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -22,31 +22,70 @@ struct insn_args {
enum jump_label_type type;
};
+static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
+{
+ /* brcl 0,0 */
+ insn->opcode = 0xc004;
+ insn->offset = 0;
+}
+
+static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
+{
+ /* brcl 15,offset */
+ insn->opcode = 0xc0f4;
+ insn->offset = (entry->target - entry->code) >> 1;
+}
+
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+ struct insn *new)
+{
+ unsigned char *ipc = (unsigned char *)entry->code;
+ unsigned char *ipe = (unsigned char *)expected;
+ unsigned char *ipn = (unsigned char *)new;
+
+ pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
+ pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
+ ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
+ pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
+ ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
+ ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
+ panic("Corrupted kernel text");
+}
+
+static struct insn orignop = {
+ .opcode = 0xc004,
+ .offset = JUMP_LABEL_NOP_OFFSET >> 1,
+};
+
static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
+ enum jump_label_type type,
+ int init)
{
- struct insn insn;
- int rc;
+ struct insn old, new;
if (type == JUMP_LABEL_ENABLE) {
- /* brcl 15,offset */
- insn.opcode = 0xc0f4;
- insn.offset = (entry->target - entry->code) >> 1;
+ jump_label_make_nop(entry, &old);
+ jump_label_make_branch(entry, &new);
} else {
- /* brcl 0,0 */
- insn.opcode = 0xc004;
- insn.offset = 0;
+ jump_label_make_branch(entry, &old);
+ jump_label_make_nop(entry, &new);
}
-
- rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
- WARN_ON_ONCE(rc < 0);
+ if (init) {
+ if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
+ jump_label_bug(entry, &orignop, &new);
+ } else {
+ if (memcmp((void *)entry->code, &old, sizeof(old)))
+ jump_label_bug(entry, &old, &new);
+ }
+ probe_kernel_write((void *)entry->code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
{
struct insn_args *args = data;
- __jump_label_transform(args->entry, args->type);
+ __jump_label_transform(args->entry, args->type, 0);
return 0;
}
@@ -64,7 +103,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
- __jump_label_transform(entry, type);
+ __jump_label_transform(entry, type, 1);
}
#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 1e4c710dfb92..f516edc1fbe3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
/*
* If kprobes patches the instruction that is morphed by
* ftrace make sure that kprobes always sees the branch
- * "jg .+24" that skips the mcount block
+ * "jg .+24" that skips the mcount block or the "brcl 0,0"
+ * in case of hotpatch.
*/
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
p->ainsn.is_ftrace_insn = 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 4685337fa7c6..fb0901ec4306 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
return 0;
}
arch_initcall(machine_kdump_pm_init);
-#endif
/*
* Start kdump: We expect here that a store status has been done on our CPU
*/
static void __do_machine_kdump(void *image)
{
-#ifdef CONFIG_CRASH_DUMP
int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
- setup_regs();
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
start_kdump(1);
-#endif
}
+#endif
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
*/
static void __machine_kexec(void *data)
{
- struct kimage *image = data;
-
__arch_local_irq_stosm(0x04); /* enable DAT */
pfault_fini();
tracing_off();
debug_locks_off();
- if (image->type == KEXEC_TYPE_CRASH) {
+#ifdef CONFIG_CRASH_DUMP
+ if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
+
lgr_info_log();
- s390_reset_system(__do_machine_kdump, data);
- } else {
- s390_reset_system(__do_machine_kexec, data);
- }
+ s390_reset_system(setup_regs, __do_machine_kdump, data);
+ } else
+#endif
+ s390_reset_system(NULL, __do_machine_kexec, data);
disabled_wait((unsigned long) __builtin_return_address(0));
}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index b6dfc5bfcb89..e499370fbccb 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
+#ifndef CC_USING_HOTPATCH
aghi %r0,MCOUNT_RETURN_FIXUP
+#endif
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 409d152585be..2ca95862e336 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -50,7 +50,7 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return 0;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
static struct attribute *cpumsf_pmu_events_attr[] = {
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+ NULL,
NULL,
};
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
return -EINVAL;
}
- if (si.ad)
+ if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+ cpumsf_pmu_events_attr[1] =
+ CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+ }
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index aa7a83948c7b..13fc0978ca7e 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
{
}
+#ifdef CONFIG_64BIT
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ if (tsk->thread.vxrs)
+ kfree(tsk->thread.vxrs);
+}
+#endif
+
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
{
@@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
- unsigned long ret;
-
- if (!(current->flags & PF_RANDOMIZE))
- return base;
- ret = PAGE_ALIGN(base + brk_rnd());
- return (ret > base) ? ret : base;
-}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index dbdd33ee0102..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -8,16 +8,24 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
+#include <asm/smp.h>
static DEFINE_PER_CPU(struct cpuid, cpu_id);
+void notrace cpu_relax(void)
+{
+ if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
+ asm volatile("diag 0,0,0x44");
+ barrier();
+}
+EXPORT_SYMBOL(cpu_relax);
+
/*
* cpu_init - initializes state that is per-CPU.
*/
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index a41f2c99dcc8..7e77e03378f3 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
- lmh %r6,%r15,96(%r15) # store upper register halves
+ lgfr %r2,%r2 # sign extend return value
+ lmh %r6,%r15,96(%r15) # restore upper register halves
ahi %r15,80
.Lesa3:
#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4e532c67832f..a5ea8bc17cb3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
+ case 0x2964:
+ strcpy(elf_platform, "z13");
+ break;
}
}
@@ -906,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
setup_lowcore();
smp_fill_possible_mask();
cpu_init();
- s390_init_cpu_topology();
/*
* Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6a2ac257d98f..b3ae6f70c6d6 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -162,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
_sigregs user_sregs;
/* Alwys make any pending restarted system call return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
return -EFAULT;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0b499f5cbe19..db8f1115a3bf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,21 +59,41 @@ enum {
CPU_STATE_CONFIGURED,
};
+static DEFINE_PER_CPU(struct cpu *, cpu_device);
+
struct pcpu {
- struct cpu *cpu;
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
- unsigned long async_stack; /* async stack for the cpu */
- unsigned long panic_stack; /* panic stack for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
- int state; /* physical cpu state */
- int polarization; /* physical polarization */
+ signed char state; /* physical cpu state */
+ signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
};
static u8 boot_cpu_type;
-static u16 boot_cpu_address;
static struct pcpu pcpu_devices[NR_CPUS];
+unsigned int smp_cpu_mt_shift;
+EXPORT_SYMBOL(smp_cpu_mt_shift);
+
+unsigned int smp_cpu_mtid;
+EXPORT_SYMBOL(smp_cpu_mtid);
+
+static unsigned int smp_max_threads __initdata = -1U;
+
+static int __init early_nosmt(char *s)
+{
+ smp_max_threads = 1;
+ return 0;
+}
+early_param("nosmt", early_nosmt);
+
+static int __init early_smt(char *s)
+{
+ get_option(&s, &smp_max_threads);
+ return 0;
+}
+early_param("smt", early_smt);
+
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
* member of a pcpu data structure within the pcpu_devices arreay.
@@ -132,7 +152,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
/*
* Find struct pcpu by cpu address.
*/
-static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
+static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
{
int cpu;
@@ -152,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
pcpu_sigp_retry(pcpu, order, 0);
}
+#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
+ unsigned long async_stack, panic_stack;
struct _lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct _lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
- pcpu->panic_stack = __get_free_page(GFP_KERNEL);
- if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+ async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+ panic_stack = __get_free_page(GFP_KERNEL);
+ if (!pcpu->lowcore || !panic_stack || !async_stack)
goto out;
+ } else {
+ async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
+ panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
}
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = pcpu->async_stack + ASYNC_SIZE
- - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
- - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
+ lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
#ifndef CONFIG_64BIT
@@ -191,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
return 0;
out:
if (pcpu != &pcpu_devices[0]) {
- free_page(pcpu->panic_stack);
- free_pages(pcpu->async_stack, ASYNC_ORDER);
+ free_page(panic_stack);
+ free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
return -ENOMEM;
@@ -214,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
#else
vdso_free_per_cpu(pcpu->lowcore);
#endif
- if (pcpu != &pcpu_devices[0]) {
- free_page(pcpu->panic_stack);
- free_pages(pcpu->async_stack, ASYNC_ORDER);
- free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
- }
+ if (pcpu == &pcpu_devices[0])
+ return;
+ free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
+ free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
+ free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -299,6 +324,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
}
/*
+ * Enable additional logical cpus for multi-threading.
+ */
+static int pcpu_set_smt(unsigned int mtid)
+{
+ register unsigned long reg1 asm ("1") = (unsigned long) mtid;
+ int cc;
+
+ if (smp_cpu_mtid == mtid)
+ return 0;
+ asm volatile(
+ " sigp %1,0,%2 # sigp set multi-threading\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
+ : "cc");
+ if (cc == 0) {
+ smp_cpu_mtid = mtid;
+ smp_cpu_mt_shift = 0;
+ while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
+ smp_cpu_mt_shift++;
+ pcpu_devices[0].address = stap();
+ }
+ return cc;
+}
+
+/*
* Call function on an online CPU.
*/
void smp_call_online_cpu(void (*func)(void *), void *data)
@@ -319,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
pcpu_delegate(&pcpu_devices[0], func, data,
- pcpu_devices->panic_stack + PAGE_SIZE);
+ pcpu_devices->lowcore->panic_stack -
+ PANIC_FRAME_OFFSET + PAGE_SIZE);
}
int smp_find_processor_id(u16 address)
@@ -512,22 +564,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
#ifdef CONFIG_CRASH_DUMP
-static void __init smp_get_save_area(int cpu, u16 address)
+static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
{
void *lc = pcpu_devices[0].lowcore;
struct save_area_ext *sa_ext;
unsigned long vx_sa;
- if (is_kdump_kernel())
- return;
- if (!OLDMEM_BASE && (address == boot_cpu_address ||
- ipl_info.type != IPL_TYPE_FCP_DUMP))
- return;
sa_ext = dump_save_area_create(cpu);
if (!sa_ext)
panic("could not allocate memory for save area\n");
- if (address == boot_cpu_address) {
- /* Copy the registers of the boot cpu. */
+ if (is_boot_cpu) {
+ /* Copy the registers of the boot CPU. */
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
SAVE_AREA_BASE - PAGE_SIZE, 0);
if (MACHINE_HAS_VX)
@@ -548,6 +595,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
free_page(vx_sa);
}
+/*
+ * Collect CPU state of the previous, crashed system.
+ * There are four cases:
+ * 1) standard zfcp dump
+ * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The boot CPU state is located in
+ * the absolute lowcore of the memory stored in the HSA. The zcore code
+ * will allocate the save area and copy the boot CPU state from the HSA.
+ * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
+ * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The firmware or the boot-loader
+ * stored the registers of the boot CPU in the absolute lowcore in the
+ * memory of the old system.
+ * 3) kdump and the old kernel did not store the CPU state,
+ * or stand-alone kdump for DASD
+ * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The kexec code or the boot-loader
+ * stored the registers of the boot CPU in the memory of the old system.
+ * 4) kdump and the old kernel stored the CPU state
+ * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
+ * The state of all CPUs is stored in ELF sections in the memory of the
+ * old system. The ELF sections are picked up by the crash_dump code
+ * via elfcorehdr_addr.
+ */
+static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
+{
+ unsigned int cpu, address, i, j;
+ int is_boot_cpu;
+
+ if (is_kdump_kernel())
+ /* Previous system stored the CPU states. Nothing to do. */
+ return;
+ if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
+ /* No previous system present, normal boot. */
+ return;
+ /* Set multi-threading state to the previous system. */
+ pcpu_set_smt(sclp_get_mtid_prev());
+ /* Collect CPU states. */
+ cpu = 0;
+ for (i = 0; i < info->configured; i++) {
+ /* Skip CPUs with different CPU type. */
+ if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
+ continue;
+ for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
+ address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
+ is_boot_cpu = (address == pcpu_devices[0].address);
+ if (is_boot_cpu && !OLDMEM_BASE)
+ /* Skip boot CPU for standard zfcp dump. */
+ continue;
+ /* Get state for this CPu. */
+ __smp_store_cpu_state(cpu, address, is_boot_cpu);
+ }
+ }
+}
+
int smp_store_status(int cpu)
{
unsigned long vx_sa;
@@ -565,10 +670,6 @@ int smp_store_status(int cpu)
return 0;
}
-#else /* CONFIG_CRASH_DUMP */
-
-static inline void smp_get_save_area(int cpu, u16 address) { }
-
#endif /* CONFIG_CRASH_DUMP */
void smp_cpu_set_polarization(int cpu, int val)
@@ -590,11 +691,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
use_sigp_detection = 1;
- for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
+ for (address = 0; address <= MAX_CPU_ADDRESS;
+ address += (1U << smp_cpu_mt_shift)) {
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
SIGP_CC_NOT_OPERATIONAL)
continue;
- info->cpu[info->configured].address = address;
+ info->cpu[info->configured].core_id =
+ address >> smp_cpu_mt_shift;
info->configured++;
}
info->combined = info->configured;
@@ -608,7 +711,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
- int cpu, nr, i;
+ int cpu, nr, i, j;
+ u16 address;
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -616,51 +720,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
continue;
- if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = info->cpu[i].address;
- pcpu->state = (i >= info->configured) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
+ address = info->cpu[i].core_id << smp_cpu_mt_shift;
+ for (j = 0; j <= smp_cpu_mtid; j++) {
+ if (pcpu_find_address(cpu_present_mask, address + j))
+ continue;
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + j;
+ pcpu->state =
+ (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
+ CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (sysfs_add && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpu = cpumask_next(cpu, &avail);
+ if (cpu >= nr_cpu_ids)
+ break;
+ }
}
return nr;
}
static void __init smp_detect_cpus(void)
{
- unsigned int cpu, c_cpus, s_cpus;
+ unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_cpu_info *info;
+ u16 address;
+ /* Get CPU information */
info = smp_get_cpu_info();
if (!info)
panic("smp_detect_cpus failed to allocate memory\n");
+
+ /* Find boot CPU type */
if (info->has_cpu_type) {
- for (cpu = 0; cpu < info->combined; cpu++) {
- if (info->cpu[cpu].address != boot_cpu_address)
- continue;
- /* The boot cpu dictates the cpu type. */
- boot_cpu_type = info->cpu[cpu].type;
- break;
- }
+ address = stap();
+ for (cpu = 0; cpu < info->combined; cpu++)
+ if (info->cpu[cpu].core_id == address) {
+ /* The boot cpu dictates the cpu type. */
+ boot_cpu_type = info->cpu[cpu].type;
+ break;
+ }
+ if (cpu >= info->combined)
+ panic("Could not find boot CPU type");
}
+
+#ifdef CONFIG_CRASH_DUMP
+ /* Collect CPU state of previous system */
+ smp_store_cpu_states(info);
+#endif
+
+ /* Set multi-threading state for the current system */
+ mtid = sclp_get_mtid(boot_cpu_type);
+ mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
+ pcpu_set_smt(mtid);
+
+ /* Print number of CPUs */
c_cpus = s_cpus = 0;
for (cpu = 0; cpu < info->combined; cpu++) {
if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
continue;
- if (cpu < info->configured) {
- smp_get_save_area(c_cpus, info->cpu[cpu].address);
- c_cpus++;
- } else
- s_cpus++;
+ if (cpu < info->configured)
+ c_cpus += smp_cpu_mtid + 1;
+ else
+ s_cpus += smp_cpu_mtid + 1;
}
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
+
+ /* Add CPUs present at boot */
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
@@ -696,12 +825,23 @@ static void smp_start_secondary(void *cpuvoid)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct pcpu *pcpu;
- int rc;
+ int base, i, rc;
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
+ base = cpu - (cpu % (smp_cpu_mtid + 1));
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (base + i < nr_cpu_ids)
+ if (cpu_online(base + i))
+ break;
+ }
+ /*
+ * If this is the first CPU of the core to get online
+ * do an initial CPU reset.
+ */
+ if (i > smp_cpu_mtid &&
+ pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@@ -774,7 +914,8 @@ void __init smp_fill_possible_mask(void)
{
unsigned int possible, sclp, cpu;
- sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
+ sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
+ sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
possible = min(possible, sclp);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
@@ -796,14 +937,9 @@ void __init smp_prepare_boot_cpu(void)
{
struct pcpu *pcpu = pcpu_devices;
- boot_cpu_address = stap();
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->address = boot_cpu_address;
+ pcpu->address = stap();
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
- pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
- + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
- pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
- + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
@@ -848,7 +984,7 @@ static ssize_t cpu_configure_store(struct device *dev,
const char *buf, size_t count)
{
struct pcpu *pcpu;
- int cpu, val, rc;
+ int cpu, val, rc, i;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
@@ -860,29 +996,43 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
- if (cpu_online(cpu) || cpu == 0)
+ cpu -= cpu % (smp_cpu_mtid + 1);
+ if (cpu == 0)
goto out;
+ for (i = 0; i <= smp_cpu_mtid; i++)
+ if (cpu_online(cpu + i))
+ goto out;
pcpu = pcpu_devices + cpu;
rc = 0;
switch (val) {
case 0:
if (pcpu->state != CPU_STATE_CONFIGURED)
break;
- rc = sclp_cpu_deconfigure(pcpu->address);
+ rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
- pcpu->state = CPU_STATE_STANDBY;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
+ continue;
+ pcpu[i].state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu + i,
+ POLARIZATION_UNKNOWN);
+ }
topology_expect_change();
break;
case 1:
if (pcpu->state != CPU_STATE_STANDBY)
break;
- rc = sclp_cpu_configure(pcpu->address);
+ rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
- pcpu->state = CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
+ continue;
+ pcpu[i].state = CPU_STATE_CONFIGURED;
+ smp_cpu_set_polarization(cpu + i,
+ POLARIZATION_UNKNOWN);
+ }
topology_expect_change();
break;
default:
@@ -929,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
- struct cpu *c = pcpu_devices[cpu].cpu;
- struct device *s = &c->dev;
+ struct device *s = &per_cpu(cpu_device, cpu)->dev;
int err = 0;
switch (action & ~CPU_TASKS_FROZEN) {
@@ -953,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
- pcpu_devices[cpu].cpu = c;
+ per_cpu(cpu_device, cpu) = c;
s = &c->dev;
c->hotpluggable = 1;
rc = register_cpu(c, cpu);
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -177,6 +177,17 @@ restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
+#ifdef CONFIG_SMP
+ larl %r1,smp_cpu_mt_shift
+ icm %r1,15,0(%r1)
+ jz smt_done
+ llgfr %r1,%r1
+smt_loop:
+ sigp %r1,%r0,SIGP_SET_MULTI_THREADING
+ brc 8,smt_done /* accepted */
+ brc 2,smt_loop /* busy, try again */
+smt_done:
+#endif
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
pgm_check_entry:
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 811f542b8ed4..99babea026ca 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -194,6 +194,41 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
+ if (info->mt_installed & 0x80) {
+ seq_printf(m, "LPAR CPUs G-MTID: %d\n",
+ info->mt_general & 0x1f);
+ seq_printf(m, "LPAR CPUs S-MTID: %d\n",
+ info->mt_installed & 0x1f);
+ seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
+ info->mt_psmtid & 0x1f);
+ }
+}
+
+static void print_ext_name(struct seq_file *m, int lvl,
+ struct sysinfo_3_2_2 *info)
+{
+ if (info->vm[lvl].ext_name_encoding == 0)
+ return;
+ if (info->ext_names[lvl][0] == 0)
+ return;
+ switch (info->vm[lvl].ext_name_encoding) {
+ case 1: /* EBCDIC */
+ EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
+ break;
+ case 2: /* UTF-8 */
+ break;
+ default:
+ return;
+ }
+ seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
+ info->ext_names[lvl]);
+}
+
+static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
+{
+ if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+ return;
+ seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid);
}
static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
@@ -213,6 +248,8 @@ static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby);
seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved);
+ print_ext_name(m, i, info);
+ print_uuid(m, i, info);
}
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 20660dddb2d6..170ddd2018b3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk)
{
u64 nsecps;
- if (tk->tkr.clock != &clocksource_tod)
+ if (tk->tkr_mono.clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
+ vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
+ vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
vdso_data->wtom_clock_sec =
tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
- + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
- nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
+ vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
+ + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
+ nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
while (vdso_data->wtom_clock_nsec >= nsecps) {
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec =
- (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
+ (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec =
@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_coarse_sec++;
}
- vdso_data->tk_mult = tk->tkr.mult;
- vdso_data->tk_shift = tk->tkr.shift;
+ vdso_data->tk_mult = tk->tkr_mono.mult;
+ vdso_data->tk_shift = tk->tkr_mono.shift;
smp_wmb();
++vdso_data->tb_update_count;
}
@@ -283,7 +283,7 @@ void __init time_init(void)
if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
- if (clocksource_register(&clocksource_tod) != 0)
+ if (__clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
/* Enable TOD clock interrupts on the boot cpu. */
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index b93bed76ea94..14da43b801d9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,14 +7,14 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
static struct mask_info socket_info;
static struct mask_info book_info;
-struct cpu_topology_s390 cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
+DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
return mask;
}
-static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
+static cpumask_t cpu_thread_map(unsigned int cpu)
+{
+ cpumask_t mask;
+ int i;
+
+ cpumask_copy(&mask, cpumask_of(cpu));
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ return mask;
+ cpu -= cpu % (smp_cpu_mtid + 1);
+ for (i = 0; i <= smp_cpu_mtid; i++)
+ if (cpu_present(cpu + i))
+ cpumask_set_cpu(cpu + i, &mask);
+ return mask;
+}
+
+static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
struct mask_info *book,
struct mask_info *socket,
int one_socket_per_cpu)
{
- unsigned int cpu;
+ unsigned int core;
- for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
- unsigned int rcpu;
- int lcpu;
+ for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
+ unsigned int rcore;
+ int lcpu, i;
- rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
- lcpu = smp_find_processor_id(rcpu);
+ rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
+ lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
if (lcpu < 0)
continue;
- cpumask_set_cpu(lcpu, &book->mask);
- cpu_topology[lcpu].book_id = book->id;
- cpumask_set_cpu(lcpu, &socket->mask);
- cpu_topology[lcpu].core_id = rcpu;
- if (one_socket_per_cpu) {
- cpu_topology[lcpu].socket_id = rcpu;
- socket = socket->next;
- } else {
- cpu_topology[lcpu].socket_id = socket->id;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ per_cpu(cpu_topology, lcpu + i).book_id = book->id;
+ per_cpu(cpu_topology, lcpu + i).core_id = rcore;
+ per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
+ cpumask_set_cpu(lcpu + i, &book->mask);
+ cpumask_set_cpu(lcpu + i, &socket->mask);
+ if (one_socket_per_cpu)
+ per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
+ else
+ per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
+ smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
- smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+ if (one_socket_per_cpu)
+ socket = socket->next;
}
return socket;
}
@@ -108,7 +126,7 @@ static void clear_masks(void)
static union topology_entry *next_tle(union topology_entry *tle)
{
if (!tle->nl)
- return (union topology_entry *)((struct topology_cpu *)tle + 1);
+ return (union topology_entry *)((struct topology_core *)tle + 1);
return (union topology_entry *)((struct topology_container *)tle + 1);
}
@@ -231,12 +249,14 @@ static void update_cpu_masks(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
- cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
- cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+ per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
+ per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
+ per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
- cpu_topology[cpu].core_id = cpu;
- cpu_topology[cpu].socket_id = cpu;
- cpu_topology[cpu].book_id = cpu;
+ per_cpu(cpu_topology, cpu).thread_id = cpu;
+ per_cpu(cpu_topology, cpu).core_id = cpu;
+ per_cpu(cpu_topology, cpu).socket_id = cpu;
+ per_cpu(cpu_topology, cpu).book_id = cpu;
}
}
spin_unlock_irqrestore(&topology_lock, flags);
@@ -314,50 +334,6 @@ void topology_expect_change(void)
set_topology_timer();
}
-static int __init early_parse_topology(char *p)
-{
- if (strncmp(p, "off", 3))
- return 0;
- topology_enabled = 0;
- return 0;
-}
-early_param("topology", early_parse_topology);
-
-static void __init alloc_masks(struct sysinfo_15_1_x *info,
- struct mask_info *mask, int offset)
-{
- int i, nr_masks;
-
- nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
- for (i = 0; i < info->mnest - offset; i++)
- nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
- nr_masks = max(nr_masks, 1);
- for (i = 0; i < nr_masks; i++) {
- mask->next = alloc_bootmem_align(
- roundup_pow_of_two(sizeof(struct mask_info)),
- roundup_pow_of_two(sizeof(struct mask_info)));
- mask = mask->next;
- }
-}
-
-void __init s390_init_cpu_topology(void)
-{
- struct sysinfo_15_1_x *info;
- int i;
-
- if (!MACHINE_HAS_TOPOLOGY)
- return;
- tl_info = alloc_bootmem_pages(PAGE_SIZE);
- info = tl_info;
- store_topology(info);
- pr_info("The CPU configuration topology of the machine is:");
- for (i = 0; i < TOPOLOGY_NR_MAG; i++)
- printk(KERN_CONT " %d", info->mag[i]);
- printk(KERN_CONT " / %d\n", info->mnest);
- alloc_masks(info, &socket_info, 1);
- alloc_masks(info, &book_info, 2);
-}
-
static int cpu_management;
static ssize_t dispatching_show(struct device *dev,
@@ -445,23 +421,75 @@ int topology_cpu_init(struct cpu *cpu)
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
+const struct cpumask *cpu_thread_mask(int cpu)
+{
+ return &per_cpu(cpu_topology, cpu).thread_mask;
+}
+
+
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_topology[cpu].core_mask;
+ return &per_cpu(cpu_topology, cpu).core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
- return &cpu_topology[cpu].book_mask;
+ return &per_cpu(cpu_topology, cpu).book_mask;
+}
+
+static int __init early_parse_topology(char *p)
+{
+ if (strncmp(p, "off", 3))
+ return 0;
+ topology_enabled = 0;
+ return 0;
}
+early_param("topology", early_parse_topology);
static struct sched_domain_topology_level s390_topology[] = {
+ { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
+static void __init alloc_masks(struct sysinfo_15_1_x *info,
+ struct mask_info *mask, int offset)
+{
+ int i, nr_masks;
+
+ nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
+ for (i = 0; i < info->mnest - offset; i++)
+ nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
+ nr_masks = max(nr_masks, 1);
+ for (i = 0; i < nr_masks; i++) {
+ mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+ mask = mask->next;
+ }
+}
+
+static int __init s390_topology_init(void)
+{
+ struct sysinfo_15_1_x *info;
+ int i;
+
+ if (!MACHINE_HAS_TOPOLOGY)
+ return 0;
+ tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+ info = tl_info;
+ store_topology(info);
+ pr_info("The CPU configuration topology of the machine is:");
+ for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+ printk(KERN_CONT " %d", info->mag[i]);
+ printk(KERN_CONT " / %d\n", info->mnest);
+ alloc_masks(info, &socket_info, 1);
+ alloc_masks(info, &book_info, 2);
+ set_sched_topology(s390_topology);
+ return 0;
+}
+early_initcall(s390_topology_init);
+
static int __init topology_init(void)
{
if (MACHINE_HAS_TOPOLOGY)
@@ -471,10 +499,3 @@ static int __init topology_init(void)
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
-
-static int __init early_topology_init(void)
-{
- set_sched_topology(s390_topology);
- return 0;
-}
-early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 7699e735ae28..61541fb93dc6 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
je 4f
cghi %r2,__CLOCK_REALTIME
je 5f
- cghi %r2,__CLOCK_THREAD_CPUTIME_ID
- je 9f
- cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
+ cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
aghi %r15,16
br %r14
- /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ /* CPUCLOCK_VIRT for this thread */
9: icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
ear %r2,%a4
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e34122e539a1..e53d3595a7c8 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -15,6 +15,8 @@
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
+#include <asm/cpu_mf.h>
+#include <asm/smp.h>
static void virt_timer_expire(void);
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
+static DEFINE_PER_CPU(u64, mt_cycles[32]);
+static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
+static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
+
static inline u64 get_vtimer(void)
{
u64 timer;
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
+ u64 user_scaled, system_scaled;
+ int i;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+ /* Do MT utilization calculation */
+ if (smp_cpu_mtid) {
+ u64 cycles_new[32], *cycles_old;
+ u64 delta, mult, div;
+
+ cycles_old = this_cpu_ptr(mt_cycles);
+ if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
+ mult = div = 0;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ delta = cycles_new[i] - cycles_old[i];
+ mult += delta;
+ div += (i + 1) * delta;
+ }
+ if (mult > 0) {
+ /* Update scaling factor */
+ __this_cpu_write(mt_scaling_mult, mult);
+ __this_cpu_write(mt_scaling_div, div);
+ memcpy(cycles_old, cycles_new,
+ sizeof(u64) * (smp_cpu_mtid + 1));
+ }
+ }
+ }
+
user = S390_lowcore.user_timer - ti->user_timer;
S390_lowcore.steal_timer -= user;
ti->user_timer = S390_lowcore.user_timer;
- account_user_time(tsk, user, user);
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, hardirq_offset, system, system);
+
+ user_scaled = user;
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ user_scaled = (user_scaled * mult) / div;
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_user_time(tsk, user, user_scaled);
+ account_system_time(tsk, hardirq_offset, system, system_scaled);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
- u64 timer, system;
+ u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, 0, system, system);
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_system_time(tsk, 0, system, system_scaled);
virt_timer_forward(system);
}
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9254afff250c..fc7ec95848c3 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
+ rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
* - gpr 3 contains the virtqueue index (passed as datamatch)
* - gpr 4 contains the index on the bus (optionally)
*/
- ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+ ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
+ int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 8a1be9017730..a7559f7207df 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include "kvm-s390.h"
#include "gaccess.h"
+#include <asm/switch_to.h>
union asce {
unsigned long val;
@@ -207,6 +208,54 @@ union raddress {
unsigned long pfra : 52; /* Page-Frame Real Address */
};
+union alet {
+ u32 val;
+ struct {
+ u32 reserved : 7;
+ u32 p : 1;
+ u32 alesn : 8;
+ u32 alen : 16;
+ };
+};
+
+union ald {
+ u32 val;
+ struct {
+ u32 : 1;
+ u32 alo : 24;
+ u32 all : 7;
+ };
+};
+
+struct ale {
+ unsigned long i : 1; /* ALEN-Invalid Bit */
+ unsigned long : 5;
+ unsigned long fo : 1; /* Fetch-Only Bit */
+ unsigned long p : 1; /* Private Bit */
+ unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
+ unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
+ unsigned long : 32;
+ unsigned long : 1;
+ unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
+ unsigned long : 6;
+ unsigned long astesn : 32; /* ASTE Sequence Number */
+} __packed;
+
+struct aste {
+ unsigned long i : 1; /* ASX-Invalid Bit */
+ unsigned long ato : 29; /* Authority-Table Origin */
+ unsigned long : 1;
+ unsigned long b : 1; /* Base-Space Bit */
+ unsigned long ax : 16; /* Authorization Index */
+ unsigned long atl : 12; /* Authority-Table Length */
+ unsigned long : 2;
+ unsigned long ca : 1; /* Controlled-ASN Bit */
+ unsigned long ra : 1; /* Reusable-ASN Bit */
+ unsigned long asce : 64; /* Address-Space-Control Element */
+ unsigned long ald : 32;
+ unsigned long astesn : 32;
+ /* .. more fields there */
+} __packed;
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu);
}
-static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
+static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
+ int write)
+{
+ union alet alet;
+ struct ale ale;
+ struct aste aste;
+ unsigned long ald_addr, authority_table_addr;
+ union ald ald;
+ int eax, rc;
+ u8 authority_table;
+
+ if (ar >= NUM_ACRS)
+ return -EINVAL;
+
+ save_access_regs(vcpu->run->s.regs.acrs);
+ alet.val = vcpu->run->s.regs.acrs[ar];
+
+ if (ar == 0 || alet.val == 0) {
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
+ } else if (alet.val == 1) {
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
+ }
+
+ if (alet.reserved)
+ return PGM_ALET_SPECIFICATION;
+
+ if (alet.p)
+ ald_addr = vcpu->arch.sie_block->gcr[5];
+ else
+ ald_addr = vcpu->arch.sie_block->gcr[2];
+ ald_addr &= 0x7fffffc0;
+
+ rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
+ if (rc)
+ return rc;
+
+ if (alet.alen / 8 > ald.all)
+ return PGM_ALEN_TRANSLATION;
+
+ if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
+ return PGM_ADDRESSING;
+
+ rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
+ sizeof(struct ale));
+ if (rc)
+ return rc;
+
+ if (ale.i == 1)
+ return PGM_ALEN_TRANSLATION;
+ if (ale.alesn != alet.alesn)
+ return PGM_ALE_SEQUENCE;
+
+ rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
+ if (rc)
+ return rc;
+
+ if (aste.i)
+ return PGM_ASTE_VALIDITY;
+ if (aste.astesn != ale.astesn)
+ return PGM_ASTE_SEQUENCE;
+
+ if (ale.p == 1) {
+ eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
+ if (ale.aleax != eax) {
+ if (eax / 16 > aste.atl)
+ return PGM_EXTENDED_AUTHORITY;
+
+ authority_table_addr = aste.ato * 4 + eax / 4;
+
+ rc = read_guest_real(vcpu, authority_table_addr,
+ &authority_table,
+ sizeof(u8));
+ if (rc)
+ return rc;
+
+ if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
+ return PGM_EXTENDED_AUTHORITY;
+ }
+ }
+
+ if (ale.fo == 1 && write)
+ return PGM_PROTECTION;
+
+ asce->val = aste.asce;
+ return 0;
+}
+
+struct trans_exc_code_bits {
+ unsigned long addr : 52; /* Translation-exception Address */
+ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
+ unsigned long : 6;
+ unsigned long b60 : 1;
+ unsigned long b61 : 1;
+ unsigned long as : 2; /* ASCE Identifier */
+};
+
+enum {
+ FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
+ FSI_STORE = 1, /* Exception was due to store operation */
+ FSI_FETCH = 2 /* Exception was due to fetch operation */
+};
+
+static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
+ ar_t ar, int write)
{
+ int rc;
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
+ struct trans_exc_code_bits *tec_bits;
+
+ memset(pgm, 0, sizeof(*pgm));
+ tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+ tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
+ tec_bits->as = psw_bits(*psw).as;
+
+ if (!psw_bits(*psw).t) {
+ asce->val = 0;
+ asce->r = 1;
+ return 0;
+ }
+
switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
case PSW_AS_PRIMARY:
- return vcpu->arch.sie_block->gcr[1];
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
case PSW_AS_SECONDARY:
- return vcpu->arch.sie_block->gcr[7];
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
case PSW_AS_HOME:
- return vcpu->arch.sie_block->gcr[13];
+ asce->val = vcpu->arch.sie_block->gcr[13];
+ return 0;
+ case PSW_AS_ACCREG:
+ rc = ar_translation(vcpu, asce, ar, write);
+ switch (rc) {
+ case PGM_ALEN_TRANSLATION:
+ case PGM_ALE_SEQUENCE:
+ case PGM_ASTE_VALIDITY:
+ case PGM_ASTE_SEQUENCE:
+ case PGM_EXTENDED_AUTHORITY:
+ vcpu->arch.pgm.exc_access_id = ar;
+ break;
+ case PGM_PROTECTION:
+ tec_bits->b60 = 1;
+ tec_bits->b61 = 1;
+ break;
+ }
+ if (rc > 0)
+ pgm->code = rc;
+ return rc;
}
return 0;
}
@@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @vcpu: virtual cpu
* @gva: guest virtual address
* @gpa: points to where guest physical (absolute) address should be stored
+ * @asce: effective asce
* @write: indicates if access is a write access
*
* Translate a guest virtual address into a guest absolute address by means
- * of dynamic address translation as specified by the architecuture.
+ * of dynamic address translation as specified by the architecture.
* If the resulting absolute address is not available in the configuration
* an addressing exception is indicated and @gpa will not be changed.
*
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* by the architecture
*/
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write)
+ unsigned long *gpa, const union asce asce,
+ int write)
{
union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva};
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
union ctlreg0 ctlreg0;
unsigned long ptr;
int edat1, edat2;
- union asce asce;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
- edat1 = ctlreg0.edat && test_vfacility(8);
- edat2 = edat1 && test_vfacility(78);
- asce.val = get_vcpu_asce(vcpu);
+ edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
+ edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
if (asce.r)
goto real_address;
ptr = asce.origin * 4096;
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga)
return (ga & ~0x11fful) == 0;
}
-static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
+static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
+ const union asce asce)
{
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- union asce asce;
if (!ctlreg0.lap)
return 0;
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && asce.p)
return 0;
return 1;
}
-struct trans_exc_code_bits {
- unsigned long addr : 52; /* Translation-exception Address */
- unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
- unsigned long : 7;
- unsigned long b61 : 1;
- unsigned long as : 2; /* ASCE Identifier */
-};
-
-enum {
- FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
- FSI_STORE = 1, /* Exception was due to store operation */
- FSI_FETCH = 2 /* Exception was due to fetch operation */
-};
-
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
unsigned long *pages, unsigned long nr_pages,
- int write)
+ const union asce asce, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
int lap_enabled, rc;
- memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
- tec_bits->as = psw_bits(*psw).as;
- lap_enabled = low_address_protection_enabled(vcpu);
+ lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
ga = kvm_s390_logical_to_effective(vcpu, ga);
tec_bits->addr = ga >> PAGE_SHIFT;
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
}
ga &= PAGE_MASK;
if (psw_bits(*psw).t) {
- rc = guest_translate(vcpu, ga, pages, write);
+ rc = guest_translate(vcpu, ga, pages, asce, write);
if (rc < 0)
return rc;
if (rc == PGM_PROTECTION)
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
return 0;
}
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
if (!len)
return 0;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
+ if (rc)
+ return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(nr_pages * sizeof(unsigned long));
if (!pages)
return -ENOMEM;
- asce.val = get_vcpu_asce(vcpu);
need_ipte_lock = psw_bits(*psw).t && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
- rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
+ rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
-int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long *gpa, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
union asce asce;
int rc;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
-
gva = kvm_s390_logical_to_effective(vcpu, gva);
- memset(pgm, 0, sizeof(*pgm));
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec->as = psw_bits(*psw).as;
- tec->fsi = write ? FSI_STORE : FSI_FETCH;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
tec->addr = gva >> PAGE_SHIFT;
- if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
+ if (rc)
+ return rc;
+ if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (write) {
rc = pgm->code = PGM_PROTECTION;
return rc;
}
}
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
- rc = guest_translate(vcpu, gva, gpa, write);
+ rc = guest_translate(vcpu, gva, gpa, asce, write);
if (rc > 0) {
if (rc == PGM_PROTECTION)
tec->b61 = 1;
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
}
/**
- * kvm_s390_check_low_addr_protection - check for low-address protection
- * @ga: Guest address
+ * check_gva_range - test a range of guest virtual addresses for accessibility
+ */
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write)
+{
+ unsigned long gpa;
+ unsigned long currlen;
+ int rc = 0;
+
+ ipte_lock(vcpu);
+ while (length > 0 && !rc) {
+ currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
+ rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
+ gva += currlen;
+ length -= currlen;
+ }
+ ipte_unlock(vcpu);
+
+ return rc;
+}
+
+/**
+ * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
+ union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
- if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
+ if (!ctlreg0.lap || !is_low_address(gra))
return 0;
memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = FSI_STORE;
tec_bits->as = psw_bits(*psw).as;
- tec_bits->addr = ga >> PAGE_SHIFT;
+ tec_bits->addr = gra >> PAGE_SHIFT;
pgm->code = PGM_PROTECTION;
return pgm->code;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0149cf15058a..ef03726cc661 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
}
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write);
+ ar_t ar, unsigned long *gpa, int write);
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write);
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* write_guest - copy data from kernel space to guest space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: source address in kernel space
* @len: number of bytes to copy
*
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* If DAT is off data will be copied to guest real or absolute memory.
* If DAT is on data will be copied to the address space as specified by
* the address space bits of the PSW:
- * Primary, secondory or home space (access register mode is currently not
- * implemented).
+ * Primary, secondary, home space or access register mode.
* The addressing mode of the PSW is also inspected, so that address wrap
* around is taken into account for 24-, 31- and 64-bit addressing mode,
* if the to be copied data crosses page boundaries in guest address space.
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception.
*/
static inline __must_check
-int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 1);
+ return access_guest(vcpu, ga, ar, data, len, 1);
}
/**
* read_guest - copy data from guest space to kernel space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: destination address in kernel space
* @len: number of bytes to copy
*
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
* data will be copied from guest space to kernel space.
*/
static inline __must_check
-int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 0);
+ return access_guest(vcpu, ga, ar, data, len, 0);
}
/**
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 3e8d4092ce30..e97b3455d7e6 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
if (!wp_info->old_data)
return -ENOMEM;
/* try to backup the original value */
- ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
- wp_info->len);
+ ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
+ wp_info->len);
if (ret) {
kfree(wp_info->old_data);
wp_info->old_data = NULL;
@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
continue;
/* refetch the wp data and compare it to the old value */
- if (!read_guest(vcpu, wp_info->phys_addr, temp,
- wp_info->len)) {
+ if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
+ wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp);
return wp_info;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 81c77ab8102e..9e3779e3e496 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu)
static int handle_stop(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
- unsigned int action_bits;
+ uint8_t flags, stop_pending;
vcpu->stat.exit_stop_request++;
- trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
- action_bits = vcpu->arch.local_int.action_bits;
+ /* delay the stop if any non-stop irq is pending */
+ if (kvm_s390_vcpu_has_irq(vcpu, 1))
+ return 0;
+
+ /* avoid races with the injection/SIGP STOP code */
+ spin_lock(&li->lock);
+ flags = li->irq.stop.flags;
+ stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
+ spin_unlock(&li->lock);
- if (!(action_bits & ACTION_STOP_ON_STOP))
+ trace_kvm_s390_stop_request(stop_pending, flags);
+ if (!stop_pending)
return 0;
- if (action_bits & ACTION_STORE_ON_STOP) {
+ if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR);
if (rc)
@@ -156,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
break;
@@ -279,11 +289,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
irq.type = KVM_S390_INT_CPU_TIMER;
break;
case EXT_IRQ_EXTERNAL_CALL:
- if (kvm_s390_si_ext_call_pending(vcpu))
- return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
- break;
+ rc = kvm_s390_inject_vcpu(vcpu, &irq);
+ /* ignore if another external call is already pending */
+ if (rc == -EBUSY)
+ return 0;
+ return rc;
default:
return -EOPNOTSUPP;
}
@@ -307,17 +319,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
/* Make sure that the source is paged-in */
- srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
- if (kvm_is_error_gpa(vcpu->kvm, srcaddr))
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
+ reg2, &srcaddr, 0);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
if (rc != 0)
return rc;
/* Make sure that the destination is paged-in */
- dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
- if (kvm_is_error_gpa(vcpu->kvm, dstaddr))
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
+ reg1, &dstaddr, 1);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
if (rc != 0)
return rc;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index f00f31e66cd8..9de47265ef73 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,7 +1,7 @@
/*
* handling kvm guest interrupts
*
- * Copyright IBM Corp. 2008,2014
+ * Copyright IBM Corp. 2008, 2015
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -17,8 +17,12 @@
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
+#include <asm/dis.h>
#include <asm/uaccess.h>
+#include <asm/sclp.h>
+#include <asm/isc.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -31,11 +35,6 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
-static int is_ioint(u64 type)
-{
- return ((type & 0xfffe0000u) != 0xfffe0000u);
-}
-
int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
@@ -71,70 +70,45 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
return 1;
}
-static u64 int_word_to_isc_bits(u32 int_word)
+static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{
- u8 isc = (int_word & 0x38000000) >> 27;
+ if (!(vcpu->arch.sie_block->ckc <
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+ return 0;
+ return ckc_interrupts_enabled(vcpu);
+}
+static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
+{
+ return !psw_extint_disabled(vcpu) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x400ul);
+}
+
+static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.sie_block->cputm >> 63) &&
+ cpu_timer_interrupts_enabled(vcpu);
+}
+
+static inline int is_ioirq(unsigned long irq_type)
+{
+ return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
+ (irq_type <= IRQ_PEND_IO_ISC_7));
+}
+
+static uint64_t isc_to_isc_bits(int isc)
+{
return (0x80 >> isc) << 24;
}
-static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static inline u8 int_word_to_isc(u32 int_word)
{
- switch (inti->type) {
- case KVM_S390_INT_EXTERNAL_CALL:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
- return 1;
- return 0;
- case KVM_S390_INT_EMERGENCY:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
- return 1;
- return 0;
- case KVM_S390_INT_CLOCK_COMP:
- return ckc_interrupts_enabled(vcpu);
- case KVM_S390_INT_CPU_TIMER:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
- return 1;
- return 0;
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_INIT:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
- return 1;
- return 0;
- case KVM_S390_PROGRAM_INT:
- case KVM_S390_SIGP_STOP:
- case KVM_S390_SIGP_SET_PREFIX:
- case KVM_S390_RESTART:
- return 1;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
- return 1;
- return 0;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[6] &
- int_word_to_isc_bits(inti->io.io_int_word))
- return 1;
- return 0;
- default:
- printk(KERN_WARNING "illegal interrupt type %llx\n",
- inti->type);
- BUG();
- }
- return 0;
+ return (int_word & 0x38000000) >> 27;
+}
+
+static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.float_int.pending_irqs;
}
static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
@@ -142,12 +116,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
return vcpu->arch.local_int.pending_irqs;
}
-static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
+static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
+ unsigned long active_mask)
{
- unsigned long active_mask = pending_local_irqs(vcpu);
+ int i;
+
+ for (i = 0; i <= MAX_ISC; i++)
+ if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
+ active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
+
+ return active_mask;
+}
+
+static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
+{
+ unsigned long active_mask;
+
+ active_mask = pending_local_irqs(vcpu);
+ active_mask |= pending_floating_irqs(vcpu);
if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK;
+ if (psw_ioint_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_IO_MASK;
+ else
+ active_mask = disable_iscs(vcpu, active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
@@ -156,8 +149,19 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
+ __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ if (!(vcpu->arch.sie_block->gcr[14] &
+ vcpu->kvm->arch.float_int.mchk.cr14))
+ __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
+
+ /*
+ * STOP irqs will never be actively delivered. They are triggered via
+ * intercept requests and cleared when the stop intercept is performed.
+ */
+ __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
return active_mask;
}
@@ -186,9 +190,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
LCTL_CR10 | LCTL_CR11);
vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
}
-
- if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
- atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
}
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -196,6 +197,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
}
+static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
+ return;
+ else if (psw_ioint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR6;
+}
+
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
@@ -216,46 +227,23 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->lctl |= LCTL_CR14;
}
-/* Set interception request for non-deliverable local interrupts */
-static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
+static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
{
- set_intercept_indicators_ext(vcpu);
- set_intercept_indicators_mchk(vcpu);
+ if (kvm_s390_is_stop_irq_pending(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
}
-static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+/* Set interception request for non-deliverable interrupts */
+static void set_intercept_indicators(struct kvm_vcpu *vcpu)
{
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR0;
- break;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- vcpu->arch.sie_block->ictl |= ICTL_LPSW;
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR14;
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_IO_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR6;
- break;
- default:
- BUG();
- }
+ set_intercept_indicators_io(vcpu);
+ set_intercept_indicators_ext(vcpu);
+ set_intercept_indicators_mchk(vcpu);
+ set_intercept_indicators_stop(vcpu);
}
static u16 get_ilc(struct kvm_vcpu *vcpu)
{
- const unsigned short table[] = { 2, 4, 4, 6 };
-
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
@@ -263,7 +251,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* last instruction only stored for these icptcodes */
- return table[vcpu->arch.sie_block->ipa >> 14];
+ return insn_length(vcpu->arch.sie_block->ipa >> 8);
case ICPT_PROGI:
return vcpu->arch.sie_block->pgmilc;
default:
@@ -339,38 +327,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_mchk_info mchk;
- int rc;
+ struct kvm_s390_mchk_info mchk = {};
+ unsigned long adtl_status_addr;
+ int deliver = 0;
+ int rc = 0;
+ spin_lock(&fi->lock);
spin_lock(&li->lock);
- mchk = li->irq.mchk;
+ if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
+ test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
+ /*
+ * If there was an exigent machine check pending, then any
+ * repressible machine checks that might have been pending
+ * are indicated along with it, so always clear bits for
+ * repressible and exigent interrupts
+ */
+ mchk = li->irq.mchk;
+ clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ memset(&li->irq.mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
/*
- * If there was an exigent machine check pending, then any repressible
- * machine checks that might have been pending are indicated along
- * with it, so always clear both bits
+ * We indicate floating repressible conditions along with
+ * other pending conditions. Channel Report Pending and Channel
+ * Subsystem damage are the only two and and are indicated by
+ * bits in mcic and masked in cr14.
*/
- clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
- clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
- memset(&li->irq.mchk, 0, sizeof(mchk));
+ if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ mchk.mcic |= fi->mchk.mcic;
+ mchk.cr14 |= fi->mchk.cr14;
+ memset(&fi->mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
spin_unlock(&li->lock);
+ spin_unlock(&fi->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk.mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk.cr14, mchk.mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk.mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk.fixed_logout, sizeof(mchk.fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (deliver) {
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_MCHK,
+ mchk.cr14, mchk.mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
+ &adtl_status_addr,
+ sizeof(unsigned long));
+ rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
+ adtl_status_addr);
+ rc |= put_guest_lc(vcpu, mchk.mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk.fixed_logout,
+ sizeof(mchk.fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ }
return rc ? -EFAULT : 0;
}
@@ -392,18 +414,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
-{
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
- vcpu->stat.deliver_stop_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
- 0, 0);
-
- __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
- clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
- return 0;
-}
-
static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -485,7 +495,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info;
- int rc = 0;
+ int rc = 0, nullifying = false;
u16 ilc = get_ilc(vcpu);
spin_lock(&li->lock);
@@ -510,6 +520,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
+ nullifying = true;
+ /* fall through */
case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
@@ -522,6 +534,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_EXTENDED_AUTHORITY:
rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
+ nullifying = true;
break;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
@@ -535,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *)__LC_EXC_ACCESS_ID);
rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
+ nullifying = true;
break;
case PGM_MONITOR:
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
@@ -542,6 +556,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
@@ -552,6 +567,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
+ case PGM_STACK_FULL:
+ case PGM_STACK_EMPTY:
+ case PGM_STACK_SPECIFICATION:
+ case PGM_STACK_TYPE:
+ case PGM_STACK_OPERATION:
+ case PGM_TRACE_TABEL:
+ case PGM_CRYPTO_OPERATION:
+ nullifying = true;
+ break;
}
if (pgm_info.code & PGM_PER) {
@@ -565,7 +589,12 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *) __LC_PER_ACCESS_ID);
}
+ if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
+ kvm_s390_rewind_psw(vcpu, ilc);
+
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
+ (u64 *) __LC_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
@@ -575,16 +604,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_ext_info ext;
+ int rc = 0;
+
+ spin_lock(&fi->lock);
+ if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
+ spin_unlock(&fi->lock);
+ return 0;
+ }
+ ext = fi->srv_signal;
+ memset(&fi->srv_signal, 0, sizeof(ext));
+ clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
- inti->ext.ext_params);
+ ext.ext_params);
vcpu->stat.deliver_service_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
+ ext.ext_params, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
@@ -592,106 +632,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ rc |= put_guest_lc(vcpu, ext.ext_params,
(u32 *)__LC_EXT_PARAMS);
+
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
- KVM_S390_INT_PFAULT_DONE, 0,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_PFAULT] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
+ clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_DONE,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
- inti->ext.ext_params, inti->ext.ext_params2);
- vcpu->stat.deliver_virtio_interrupt++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4,
+ "interrupt: virtio parm:%x,parm64:%llx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ inti->ext.ext_params,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
+ clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+ unsigned long irq_type)
{
- int rc;
+ struct list_head *isc_list;
+ struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
- vcpu->stat.deliver_io_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- ((__u32)inti->io.subchannel_id << 16) |
- inti->io.subchannel_nr,
- ((__u64)inti->io.io_int_parm << 32) |
- inti->io.io_int_word);
-
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- return rc ? -EFAULT : 0;
-}
+ fi = &vcpu->kvm->arch.float_int;
-static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- struct kvm_s390_mchk_info *mchk = &inti->mchk;
- int rc;
+ spin_lock(&fi->lock);
+ isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
+ inti = list_first_entry_or_null(isc_list,
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr,
+ ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ }
+ if (list_empty(isc_list))
+ clear_bit(irq_type, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+
+ if (inti) {
+ rc = put_guest_lc(vcpu, inti->io.subchannel_id,
+ (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
+ (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
+ (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_word,
+ (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ kfree(inti);
+ }
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk->mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk->cr14, mchk->mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk->mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk->fixed_logout, sizeof(mchk->fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
return rc ? -EFAULT : 0;
}
@@ -699,81 +779,49 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_MCHK_EX] = __deliver_machine_check,
+ [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
[IRQ_PEND_PROG] = __deliver_prog,
[IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
[IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
[IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
[IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
[IRQ_PEND_RESTART] = __deliver_restart,
- [IRQ_PEND_SIGP_STOP] = __deliver_stop,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
+ [IRQ_PEND_EXT_SERVICE] = __deliver_service,
+ [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
+ [IRQ_PEND_VIRTIO] = __deliver_virtio,
};
-static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+/* Check whether an external call is pending (deliverable or not) */
+int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
- int rc;
-
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- rc = __deliver_service(vcpu, inti);
- break;
- case KVM_S390_INT_PFAULT_DONE:
- rc = __deliver_pfault_done(vcpu, inti);
- break;
- case KVM_S390_INT_VIRTIO:
- rc = __deliver_virtio(vcpu, inti);
- break;
- case KVM_S390_MCHK:
- rc = __deliver_mchk_floating(vcpu, inti);
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- rc = __deliver_io(vcpu, inti);
- break;
- default:
- BUG();
- }
-
- return rc;
-}
-
-/* Check whether SIGP interpretation facility has an external call pending */
-int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
-{
- atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
- if (!psw_extint_disabled(vcpu) &&
- (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
- (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
- (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
- return 1;
+ if (!sclp_has_sigpif())
+ return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
- return 0;
+ return (sigp_ctrl & SIGP_CTRL_C) &&
+ (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
}
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *inti;
int rc;
- rc = !!deliverable_local_irqs(vcpu);
-
- if ((!rc) && atomic_read(&fi->active)) {
- spin_lock(&fi->lock);
- list_for_each_entry(inti, &fi->list, list)
- if (__interrupt_is_deliverable(vcpu, inti)) {
- rc = 1;
- break;
- }
- spin_unlock(&fi->lock);
- }
+ rc = !!deliverable_irqs(vcpu);
if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1;
- if (!rc && kvm_s390_si_ext_call_pending(vcpu))
+ /* external call pending and deliverable */
+ if (!rc && kvm_s390_ext_call_pending(vcpu) &&
+ !psw_extint_disabled(vcpu) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
+ rc = 1;
+
+ if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
rc = 1;
return rc;
@@ -781,12 +829,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.sie_block->ckc <
- get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
- return 0;
- if (!ckc_interrupts_enabled(vcpu))
- return 0;
- return 1;
+ return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
@@ -804,14 +847,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; /* disabled wait */
}
- __set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
+ __set_cpu_idle(vcpu);
goto no_timer;
}
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
+ /* underflow */
+ if (vcpu->arch.sie_block->ckc < now)
+ return 0;
+
+ __set_cpu_idle(vcpu);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer:
@@ -820,7 +869,7 @@ no_timer:
__unset_cpu_idle(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
+ hrtimer_cancel(&vcpu->arch.ckc_timer);
return 0;
}
@@ -840,10 +889,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
+ u64 now, sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
- kvm_s390_vcpu_wakeup(vcpu);
+ now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+ /*
+ * If the monotonic clock runs faster than the tod clock we might be
+ * woken up too early and have to go back to sleep to avoid deadlocks.
+ */
+ if (vcpu->arch.sie_block->ckc > now &&
+ hrtimer_forward_now(timer, ns_to_ktime(sltime)))
+ return HRTIMER_RESTART;
+ kvm_s390_vcpu_wakeup(vcpu);
return HRTIMER_NORESTART;
}
@@ -859,67 +918,51 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
/* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
- atomic_clear_mask(SIGP_CTRL_C,
- &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
+ vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
}
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
deliver_irq_t func;
- int deliver;
int rc = 0;
unsigned long irq_type;
- unsigned long deliverable_irqs;
+ unsigned long irqs;
__reset_intercept_indicators(vcpu);
/* pending ckc conditions might have been invalidated */
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
- if (kvm_cpu_has_pending_timer(vcpu))
+ if (ckc_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ /* pending cpu timer conditions might have been invalidated */
+ clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ if (cpu_timer_irq_pending(vcpu))
+ set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+
do {
- deliverable_irqs = deliverable_local_irqs(vcpu);
+ irqs = deliverable_irqs(vcpu);
/* bits are in the order of interrupt priority */
- irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
+ irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
if (irq_type == IRQ_PEND_COUNT)
break;
- func = deliver_irq_funcs[irq_type];
- if (!func) {
- WARN_ON_ONCE(func == NULL);
- clear_bit(irq_type, &li->pending_irqs);
- continue;
+ if (is_ioirq(irq_type)) {
+ rc = __deliver_io(vcpu, irq_type);
+ } else {
+ func = deliver_irq_funcs[irq_type];
+ if (!func) {
+ WARN_ON_ONCE(func == NULL);
+ clear_bit(irq_type, &li->pending_irqs);
+ continue;
+ }
+ rc = func(vcpu);
}
- rc = func(vcpu);
- } while (!rc && irq_type != IRQ_PEND_COUNT);
-
- set_intercept_indicators_local(vcpu);
+ if (rc)
+ break;
+ } while (!rc);
- if (!rc && atomic_read(&fi->active)) {
- do {
- deliver = 0;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
- if (__interrupt_is_deliverable(vcpu, inti)) {
- list_del(&inti->list);
- fi->irq_count--;
- deliver = 1;
- break;
- }
- __set_intercept_indicator(vcpu, inti);
- }
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
- if (deliver) {
- rc = __deliver_floating_interrupt(vcpu, inti);
- kfree(inti);
- }
- } while (!rc && deliver);
- }
+ set_intercept_indicators(vcpu);
return rc;
}
@@ -984,18 +1027,43 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0;
}
-int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
+{
+ unsigned char new_val, old_val;
+ uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+
+ new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
+ old_val = *sigp_ctrl & ~SIGP_CTRL_C;
+ if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
+ /* another external call is pending */
+ return -EBUSY;
+ }
+ atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+ return 0;
+}
+
+static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
+ uint16_t src_id = irq->u.extcall.code;
VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
- irq->u.extcall.code);
+ src_id);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
- irq->u.extcall.code, 0, 2);
+ src_id, 0, 2);
+
+ /* sending vcpu invalid */
+ if (src_id >= KVM_MAX_VCPUS ||
+ kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
+ return -EINVAL;
+
+ if (sclp_has_sigpif())
+ return __inject_extcall_sigpif(vcpu, src_id);
+ if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
+ return -EBUSY;
*extcall = irq->u.extcall;
- set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}
@@ -1006,23 +1074,41 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
- prefix->address);
+ irq->u.prefix.address);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
- prefix->address, 0, 2);
+ irq->u.prefix.address, 0, 2);
+
+ if (!is_vcpu_stopped(vcpu))
+ return -EBUSY;
*prefix = irq->u.prefix;
set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
return 0;
}
+#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_stop_info *stop = &li->irq.stop;
+ int rc = 0;
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
- li->action_bits |= ACTION_STOP_ON_STOP;
- set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
+ return -EINVAL;
+
+ if (is_vcpu_stopped(vcpu)) {
+ if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
+ rc = kvm_s390_store_status_unloaded(vcpu,
+ KVM_S390_STORE_STATUS_NOADDR);
+ return rc;
+ }
+
+ if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
+ return -EBUSY;
+ stop->flags = irq->u.stop.flags;
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
return 0;
}
@@ -1042,14 +1128,13 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
irq->u.emerg.code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
- emerg->code, 0, 2);
+ irq->u.emerg.code, 0, 2);
- set_bit(emerg->code, li->sigp_emerg_pending);
+ set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
@@ -1061,9 +1146,9 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
- mchk->mcic);
+ irq->u.mchk.mcic);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
- mchk->mcic, 2);
+ irq->u.mchk.mcic, 2);
/*
* Because repressible machine checks can be indicated along with
@@ -1112,81 +1197,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
return 0;
}
+static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
+ int isc, u32 schid)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ struct kvm_s390_interrupt_info *iter;
+ u16 id = (schid & 0xffff0000U) >> 16;
+ u16 nr = schid & 0x0000ffffU;
+ spin_lock(&fi->lock);
+ list_for_each_entry(iter, isc_list, list) {
+ if (schid && (id != iter->io.subchannel_id ||
+ nr != iter->io.subchannel_nr))
+ continue;
+ /* found an appropriate entry */
+ list_del_init(&iter->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ if (list_empty(isc_list))
+ clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return iter;
+ }
+ spin_unlock(&fi->lock);
+ return NULL;
+}
+
+/*
+ * Dequeue and return an I/O interrupt matching any of the interruption
+ * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
+ */
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid)
+ u64 isc_mask, u32 schid)
+{
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int isc;
+
+ for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
+ if (isc_mask & isc_to_isc_bits(isc))
+ inti = get_io_int(kvm, isc, schid);
+ }
+ return inti;
+}
+
+#define SCCB_MASK 0xFFFFFFF8
+#define SCCB_EVENT_PENDING 0x3
+
+static int __inject_service(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
+ /*
+ * Early versions of the QEMU s390 bios will inject several
+ * service interrupts after another without handling a
+ * condition code indicating busy.
+ * We will silently ignore those superfluous sccb values.
+ * A future version of QEMU will take care of serialization
+ * of servc requests
+ */
+ if (fi->srv_signal.ext_params & SCCB_MASK)
+ goto out;
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
+ set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+out:
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_virtio(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_VIRTIO] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
+ set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+static int __inject_pfault_done(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_PFAULT] >=
+ (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_PFAULT] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
+ set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+#define CR_PENDING_SUBCLASS 28
+static int __inject_float_mchk(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
+ fi->mchk.mcic |= inti->mchk.mcic;
+ set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *inti, *iter;
+ struct list_head *list;
+ int isc;
- if ((!schid && !cr6) || (schid && cr6))
- return NULL;
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
- inti = NULL;
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (cr6 &&
- ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
- continue;
- if (schid) {
- if (((schid & 0x00000000ffff0000) >> 16) !=
- iter->io.subchannel_id)
- continue;
- if ((schid & 0x000000000000ffff) !=
- iter->io.subchannel_nr)
- continue;
- }
- inti = iter;
- break;
- }
- if (inti) {
- list_del_init(&inti->list);
- fi->irq_count--;
+ if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
}
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
+ fi->counters[FIRQ_CNTR_IO] += 1;
+
+ isc = int_word_to_isc(inti->io.io_int_word);
+ list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ list_add_tail(&inti->list, list);
+ set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
- return inti;
+ return 0;
}
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *iter;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu;
- int rc = 0;
+ u64 type = READ_ONCE(inti->type);
+ int rc;
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
+
+ switch (type) {
+ case KVM_S390_MCHK:
+ rc = __inject_float_mchk(kvm, inti);
+ break;
+ case KVM_S390_INT_VIRTIO:
+ rc = __inject_virtio(kvm, inti);
+ break;
+ case KVM_S390_INT_SERVICE:
+ rc = __inject_service(kvm, inti);
+ break;
+ case KVM_S390_INT_PFAULT_DONE:
+ rc = __inject_pfault_done(kvm, inti);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ rc = __inject_io(kvm, inti);
+ break;
+ default:
rc = -EINVAL;
- goto unlock_fi;
}
- fi->irq_count++;
- if (!is_ioint(inti->type)) {
- list_add_tail(&inti->list, &fi->list);
- } else {
- u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
+ if (rc)
+ return rc;
- /* Keep I/O interrupts sorted in isc order. */
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (int_word_to_isc_bits(iter->io.io_int_word)
- <= isc_bits)
- continue;
- break;
- }
- list_add_tail(&inti->list, &iter->list);
- }
- atomic_set(&fi->active, 1);
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
do {
@@ -1198,7 +1384,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
- switch (inti->type) {
+ switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
break;
@@ -1211,16 +1397,15 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
}
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
-unlock_fi:
- spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
- return rc;
+ return 0;
+
}
int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int)
{
struct kvm_s390_interrupt_info *inti;
+ int rc;
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
@@ -1239,7 +1424,6 @@ int kvm_s390_inject_vm(struct kvm *kvm,
inti->ext.ext_params = s390int->parm;
break;
case KVM_S390_INT_PFAULT_DONE:
- inti->type = s390int->type;
inti->ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_MCHK:
@@ -1268,13 +1452,16 @@ int kvm_s390_inject_vm(struct kvm *kvm,
trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2);
- return __inject_vm(kvm, inti);
+ rc = __inject_vm(kvm, inti);
+ if (rc)
+ kfree(inti);
+ return rc;
}
-void kvm_s390_reinject_io_int(struct kvm *kvm,
+int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
- __inject_vm(kvm, inti);
+ return __inject_vm(kvm, inti);
}
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
@@ -1290,13 +1477,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
case KVM_S390_SIGP_SET_PREFIX:
irq->u.prefix.address = s390int->parm;
break;
+ case KVM_S390_SIGP_STOP:
+ irq->u.stop.flags = s390int->parm;
+ break;
case KVM_S390_INT_EXTERNAL_CALL:
- if (irq->u.extcall.code & 0xffff0000)
+ if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.extcall.code = s390int->parm;
break;
case KVM_S390_INT_EMERGENCY:
- if (irq->u.emerg.code & 0xffff0000)
+ if (s390int->parm & 0xffff0000)
return -EINVAL;
irq->u.emerg.code = s390int->parm;
break;
@@ -1307,12 +1497,27 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
return 0;
}
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+}
+
+void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- int rc;
spin_lock(&li->lock);
+ li->irq.stop.flags = 0;
+ clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ spin_unlock(&li->lock);
+}
+
+static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ int rc;
+
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
@@ -1352,87 +1557,130 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
default:
rc = -EINVAL;
}
+
+ return rc;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ spin_lock(&li->lock);
+ rc = do_inject_vcpu(vcpu, irq);
spin_unlock(&li->lock);
if (!rc)
kvm_s390_vcpu_wakeup(vcpu);
return rc;
}
-void kvm_s390_clear_float_irqs(struct kvm *kvm)
+static inline void clear_irq_list(struct list_head *_list)
{
- struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
+ struct kvm_s390_interrupt_info *inti, *n;
- mutex_lock(&kvm->lock);
- fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
+ list_for_each_entry_safe(inti, n, _list, list) {
list_del(&inti->list);
kfree(inti);
}
- fi->irq_count = 0;
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
}
-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
- u8 *addr)
+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
- struct kvm_s390_irq irq = {0};
-
- irq.type = inti->type;
+ irq->type = inti->type;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_SERVICE:
- irq.u.ext = inti->ext;
+ irq->u.ext = inti->ext;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- irq.u.io = inti->io;
+ irq->u.io = inti->io;
break;
- case KVM_S390_MCHK:
- irq.u.mchk = inti->mchk;
- break;
- default:
- return -EINVAL;
}
+}
- if (copy_to_user(uptr, &irq, sizeof(irq)))
- return -EFAULT;
+void kvm_s390_clear_float_irqs(struct kvm *kvm)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ int i;
- return 0;
-}
+ spin_lock(&fi->lock);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ clear_irq_list(&fi->lists[i]);
+ for (i = 0; i < FIRQ_MAX_COUNT; i++)
+ fi->counters[i] = 0;
+ spin_unlock(&fi->lock);
+};
-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_irq *buf;
+ struct kvm_s390_irq *irq;
+ int max_irqs;
int ret = 0;
int n = 0;
+ int i;
+
+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
+ return -EINVAL;
+
+ /*
+ * We are already using -ENOMEM to signal
+ * userspace it may retry with a bigger buffer,
+ * so we need to use something else for this case
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return -ENOBUFS;
+
+ max_irqs = len / sizeof(struct kvm_s390_irq);
- mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
-
- list_for_each_entry(inti, &fi->list, list) {
- if (len < sizeof(struct kvm_s390_irq)) {
+ for (i = 0; i < FIRQ_LIST_COUNT; i++) {
+ list_for_each_entry(inti, &fi->lists[i], list) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ inti_to_irq(inti, &buf[n]);
+ n++;
+ }
+ }
+ if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
+ if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
- break;
+ goto out;
}
- ret = copy_irq_to_user(inti, buf);
- if (ret)
- break;
- buf += sizeof(struct kvm_s390_irq);
- len -= sizeof(struct kvm_s390_irq);
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_INT_SERVICE;
+ irq->u.ext = fi->srv_signal;
n++;
}
+ if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = fi->mchk;
+ n++;
+}
+out:
spin_unlock(&fi->lock);
- mutex_unlock(&kvm->lock);
+ if (!ret && n > 0) {
+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
+ ret = -EFAULT;
+ }
+ vfree(buf);
return ret < 0 ? ret : n;
}
@@ -1443,7 +1691,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
default:
@@ -1875,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
{
return -EINVAL;
}
+
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_irq *buf;
+ int r = 0;
+ int n;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user((void *) buf, irqstate, len)) {
+ r = -EFAULT;
+ goto out_free;
+ }
+
+ /*
+ * Don't allow setting the interrupt state
+ * when there are already interrupts pending
+ */
+ spin_lock(&li->lock);
+ if (li->pending_irqs) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (n = 0; n < len / sizeof(*buf); n++) {
+ r = do_inject_vcpu(vcpu, &buf[n]);
+ if (r)
+ break;
+ }
+
+out_unlock:
+ spin_unlock(&li->lock);
+out_free:
+ vfree(buf);
+
+ return r;
+}
+
+static void store_local_irq(struct kvm_s390_local_interrupt *li,
+ struct kvm_s390_irq *irq,
+ unsigned long irq_type)
+{
+ switch (irq_type) {
+ case IRQ_PEND_MCHK_EX:
+ case IRQ_PEND_MCHK_REP:
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = li->irq.mchk;
+ break;
+ case IRQ_PEND_PROG:
+ irq->type = KVM_S390_PROGRAM_INT;
+ irq->u.pgm = li->irq.pgm;
+ break;
+ case IRQ_PEND_PFAULT_INIT:
+ irq->type = KVM_S390_INT_PFAULT_INIT;
+ irq->u.ext = li->irq.ext;
+ break;
+ case IRQ_PEND_EXT_EXTERNAL:
+ irq->type = KVM_S390_INT_EXTERNAL_CALL;
+ irq->u.extcall = li->irq.extcall;
+ break;
+ case IRQ_PEND_EXT_CLOCK_COMP:
+ irq->type = KVM_S390_INT_CLOCK_COMP;
+ break;
+ case IRQ_PEND_EXT_CPU_TIMER:
+ irq->type = KVM_S390_INT_CPU_TIMER;
+ break;
+ case IRQ_PEND_SIGP_STOP:
+ irq->type = KVM_S390_SIGP_STOP;
+ irq->u.stop = li->irq.stop;
+ break;
+ case IRQ_PEND_RESTART:
+ irq->type = KVM_S390_RESTART;
+ break;
+ case IRQ_PEND_SET_PREFIX:
+ irq->type = KVM_S390_SIGP_SET_PREFIX;
+ irq->u.prefix = li->irq.prefix;
+ break;
+ }
+}
+
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
+{
+ uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+ unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ unsigned long pending_irqs;
+ struct kvm_s390_irq irq;
+ unsigned long irq_type;
+ int cpuaddr;
+ int n = 0;
+
+ spin_lock(&li->lock);
+ pending_irqs = li->pending_irqs;
+ memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
+ sizeof(sigp_emerg_pending));
+ spin_unlock(&li->lock);
+
+ for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
+ memset(&irq, 0, sizeof(irq));
+ if (irq_type == IRQ_PEND_EXT_EMERGENCY)
+ continue;
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
+ for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
+ memset(&irq, 0, sizeof(irq));
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ irq.type = KVM_S390_INT_EMERGENCY;
+ irq.u.emerg.code = cpuaddr;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+ }
+
+ if ((sigp_ctrl & SIGP_CTRL_C) &&
+ (atomic_read(&vcpu->arch.sie_block->cpuflags) &
+ CPUSTAT_ECALL_PEND)) {
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ memset(&irq, 0, sizeof(irq));
+ irq.type = KVM_S390_INT_EXTERNAL_CALL;
+ irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ return n;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3e09801e3104..afa2bd750ffc 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -22,14 +22,16 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
-#include <asm/facility.h>
+#include <asm/isc.h>
#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -38,6 +40,11 @@
#include "trace.h"
#include "trace-s390.h"
+#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
+#define LOCAL_IRQS 32
+#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
+ (KVM_MAX_VCPUS + LOCAL_IRQS))
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -50,6 +57,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exit_instruction", VCPU_STAT(exit_instruction) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+ { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -86,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
+ { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
@@ -98,15 +107,20 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-unsigned long *vfacilities;
-static struct gmap_notifier gmap_notifier;
+/* upper facilities limit for kvm */
+unsigned long kvm_s390_fac_list_mask[] = {
+ 0xffe6fffbfcfdfc40UL,
+ 0x205c800000000000UL,
+};
-/* test availability of vfacility */
-int test_vfacility(unsigned long nr)
+unsigned long kvm_s390_fac_list_mask_size(void)
{
- return __test_facility(nr, (void *) vfacilities);
+ BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
+ return ARRAY_SIZE(kvm_s390_fac_list_mask);
}
+static struct gmap_notifier gmap_notifier;
+
/* Section: not file related */
int kvm_arch_hardware_enable(void)
{
@@ -159,15 +173,22 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_S390_CSS_SUPPORT:
- case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_S390_INJECT_IRQ:
+ case KVM_CAP_S390_USER_SIGP:
+ case KVM_CAP_S390_USER_STSI:
+ case KVM_CAP_S390_SKEYS:
+ case KVM_CAP_S390_IRQ_STATE:
r = 1;
break;
+ case KVM_CAP_S390_MEM_OP:
+ r = MEM_OP_MAX_SIZE;
+ break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -178,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ r = MACHINE_HAS_VX;
+ break;
default:
r = 0;
}
@@ -254,6 +278,22 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.use_irqchip = 1;
r = 0;
break;
+ case KVM_CAP_S390_USER_SIGP:
+ kvm->arch.user_sigp = 1;
+ r = 0;
+ break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ if (MACHINE_HAS_VX) {
+ set_kvm_facility(kvm->arch.model.fac->mask, 129);
+ set_kvm_facility(kvm->arch.model.fac->list, 129);
+ r = 0;
+ } else
+ r = -EINVAL;
+ break;
+ case KVM_CAP_S390_USER_STSI:
+ kvm->arch.user_stsi = 1;
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -261,7 +301,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
return r;
}
-static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
+static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_MEM_LIMIT_SIZE:
+ ret = 0;
+ if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
unsigned int idx;
@@ -283,6 +340,190 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_unlock(&kvm->lock);
ret = 0;
break;
+ case KVM_S390_VM_MEM_LIMIT_SIZE: {
+ unsigned long new_limit;
+
+ if (kvm_is_ucontrol(kvm))
+ return -EINVAL;
+
+ if (get_user(new_limit, (u64 __user *)attr->addr))
+ return -EFAULT;
+
+ if (new_limit > kvm->arch.gmap->asce_end)
+ return -E2BIG;
+
+ ret = -EBUSY;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus) == 0) {
+ /* gmap_alloc will round the limit up */
+ struct gmap *new = gmap_alloc(current->mm, new_limit);
+
+ if (!new) {
+ ret = -ENOMEM;
+ } else {
+ gmap_free(kvm->arch.gmap);
+ new->private = kvm;
+ kvm->arch.gmap = new;
+ ret = 0;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ break;
+ }
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
+
+static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ if (!test_kvm_facility(kvm, 76))
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+ switch (attr->attr) {
+ case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ get_random_bytes(
+ kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ kvm->arch.crypto.aes_kw = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ get_random_bytes(
+ kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+ kvm->arch.crypto.dea_kw = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ kvm->arch.crypto.aes_kw = 0;
+ memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
+ kvm->arch.crypto.dea_kw = 0;
+ memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+ break;
+ default:
+ mutex_unlock(&kvm->lock);
+ return -ENXIO;
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvm_s390_vcpu_crypto_setup(vcpu);
+ exit_sie(vcpu);
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ u8 gtod_high;
+
+ if (copy_from_user(&gtod_high, (void __user *)attr->addr,
+ sizeof(gtod_high)))
+ return -EFAULT;
+
+ if (gtod_high != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_vcpu *cur_vcpu;
+ unsigned int vcpu_idx;
+ u64 host_tod, gtod;
+ int r;
+
+ if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+ return -EFAULT;
+
+ r = store_tod_clock(&host_tod);
+ if (r)
+ return r;
+
+ mutex_lock(&kvm->lock);
+ kvm->arch.epoch = gtod - host_tod;
+ kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
+ cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+ exit_sie(cur_vcpu);
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ if (attr->flags)
+ return -EINVAL;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_TOD_HIGH:
+ ret = kvm_s390_set_tod_high(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD_LOW:
+ ret = kvm_s390_set_tod_low(kvm, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ u8 gtod_high = 0;
+
+ if (copy_to_user((void __user *)attr->addr, &gtod_high,
+ sizeof(gtod_high)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ u64 host_tod, gtod;
+ int r;
+
+ r = store_tod_clock(&host_tod);
+ if (r)
+ return r;
+
+ gtod = host_tod + kvm->arch.epoch;
+ if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ if (attr->flags)
+ return -EINVAL;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_TOD_HIGH:
+ ret = kvm_s390_get_tod_high(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD_LOW:
+ ret = kvm_s390_get_tod_low(kvm, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -290,13 +531,122 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
return ret;
}
+static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_processor *proc;
+ int ret = 0;
+
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (!proc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!copy_from_user(proc, (void __user *)attr->addr,
+ sizeof(*proc))) {
+ memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
+ sizeof(struct cpuid));
+ kvm->arch.model.ibc = proc->ibc;
+ memcpy(kvm->arch.model.fac->list, proc->fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+ } else
+ ret = -EFAULT;
+ kfree(proc);
+out:
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_CPU_PROCESSOR:
+ ret = kvm_s390_set_processor(kvm, attr);
+ break;
+ }
+ return ret;
+}
+
+static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_processor *proc;
+ int ret = 0;
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (!proc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
+ proc->ibc = kvm->arch.model.ibc;
+ memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
+ if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
+ ret = -EFAULT;
+ kfree(proc);
+out:
+ return ret;
+}
+
+static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_machine *mach;
+ int ret = 0;
+
+ mach = kzalloc(sizeof(*mach), GFP_KERNEL);
+ if (!mach) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ get_cpu_id((struct cpuid *) &mach->cpuid);
+ mach->ibc = sclp_get_ibc();
+ memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+ memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+ if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
+ ret = -EFAULT;
+ kfree(mach);
+out:
+ return ret;
+}
+
+static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_S390_VM_CPU_PROCESSOR:
+ ret = kvm_s390_get_processor(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MACHINE:
+ ret = kvm_s390_get_machine(kvm, attr);
+ break;
+ }
+ return ret;
+}
+
static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_S390_VM_MEM_CTRL:
- ret = kvm_s390_mem_control(kvm, attr);
+ ret = kvm_s390_set_mem_control(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD:
+ ret = kvm_s390_set_tod(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MODEL:
+ ret = kvm_s390_set_cpu_model(kvm, attr);
+ break;
+ case KVM_S390_VM_CRYPTO:
+ ret = kvm_s390_vm_set_crypto(kvm, attr);
break;
default:
ret = -ENXIO;
@@ -308,7 +658,24 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
{
- return -ENXIO;
+ int ret;
+
+ switch (attr->group) {
+ case KVM_S390_VM_MEM_CTRL:
+ ret = kvm_s390_get_mem_control(kvm, attr);
+ break;
+ case KVM_S390_VM_TOD:
+ ret = kvm_s390_get_tod(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MODEL:
+ ret = kvm_s390_get_cpu_model(kvm, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
}
static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
@@ -320,6 +687,42 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
case KVM_S390_VM_MEM_CLR_CMMA:
+ case KVM_S390_VM_MEM_LIMIT_SIZE:
+ ret = 0;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case KVM_S390_VM_TOD:
+ switch (attr->attr) {
+ case KVM_S390_VM_TOD_LOW:
+ case KVM_S390_VM_TOD_HIGH:
+ ret = 0;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case KVM_S390_VM_CPU_MODEL:
+ switch (attr->attr) {
+ case KVM_S390_VM_CPU_PROCESSOR:
+ case KVM_S390_VM_CPU_MACHINE:
+ ret = 0;
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ break;
+ case KVM_S390_VM_CRYPTO:
+ switch (attr->attr) {
+ case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0;
break;
default:
@@ -335,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
return ret;
}
+static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ unsigned long curkey;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Is this guest using storage keys? */
+ if (!mm_use_skey(current->mm))
+ return KVM_S390_GET_SKEYS_NONE;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ curkey = get_guest_storage_key(current->mm, hva);
+ if (IS_ERR_VALUE(curkey)) {
+ r = curkey;
+ goto out;
+ }
+ keys[i] = curkey;
+ }
+
+ r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
+ sizeof(uint8_t) * args->count);
+ if (r)
+ r = -EFAULT;
+out:
+ kvfree(keys);
+ return r;
+}
+
+static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
+ sizeof(uint8_t) * args->count);
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Enable storage key handling for the guest */
+ s390_enable_skey();
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Lowest order bit is reserved */
+ if (keys[i] & 0x01) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = set_guest_storage_key(current->mm, hva,
+ (unsigned long)keys[i], 0);
+ if (r)
+ goto out;
+ }
+out:
+ kvfree(keys);
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -394,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_s390_vm_has_attr(kvm, &attr);
break;
}
+ case KVM_S390_GET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_get_skeys(kvm, &args);
+ break;
+ }
+ case KVM_S390_SET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_set_skeys(kvm, &args);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -401,9 +926,64 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r;
}
+static int kvm_s390_query_ap_config(u8 *config)
+{
+ u32 fcn_code = 0x04000000UL;
+ u32 cc = 0;
+
+ memset(config, 0, 128);
+ asm volatile(
+ "lgr 0,%1\n"
+ "lgr 2,%2\n"
+ ".long 0xb2af0000\n" /* PQAP(QCI) */
+ "0: ipm %0\n"
+ "srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+r" (cc)
+ : "r" (fcn_code), "r" (config)
+ : "cc", "0", "2", "memory"
+ );
+
+ return cc;
+}
+
+static int kvm_s390_apxa_installed(void)
+{
+ u8 config[128];
+ int cc;
+
+ if (test_facility(2) && test_facility(12)) {
+ cc = kvm_s390_query_ap_config(config);
+
+ if (cc)
+ pr_err("PQAP(QCI) failed with cc=%d", cc);
+ else
+ return config[0] & 0x40;
+ }
+
+ return 0;
+}
+
+static void kvm_s390_set_crycb_format(struct kvm *kvm)
+{
+ kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+
+ if (kvm_s390_apxa_installed())
+ kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
+ else
+ kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
+}
+
+static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
+{
+ get_cpu_id(cpu_id);
+ cpu_id->version = 0xff;
+}
+
static int kvm_s390_crypto_init(struct kvm *kvm)
{
- if (!test_vfacility(76))
+ if (!test_kvm_facility(kvm, 76))
return 0;
kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
@@ -411,15 +991,22 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
if (!kvm->arch.crypto.crycb)
return -ENOMEM;
- kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
- CRYCB_FORMAT1;
+ kvm_s390_set_crycb_format(kvm);
+
+ /* Enable AES/DEA protected key functions by default */
+ kvm->arch.crypto.aes_kw = 1;
+ kvm->arch.crypto.dea_kw = 1;
+ get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
return 0;
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int rc;
+ int i, rc;
char debug_name[16];
static unsigned long sca_offset;
@@ -452,13 +1039,43 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
if (!kvm->arch.dbf)
- goto out_nodbf;
+ goto out_err;
+
+ /*
+ * The architectural maximum amount of facilities is 16 kbit. To store
+ * this amount, 2 kbyte of memory is required. Thus we need a full
+ * page to hold the guest facility list (arch.model.fac->list) and the
+ * facility mask (arch.model.fac->mask). Its address size has to be
+ * 31 bits and word aligned.
+ */
+ kvm->arch.model.fac =
+ (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!kvm->arch.model.fac)
+ goto out_err;
+
+ /* Populate the facility mask initially. */
+ memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+ for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
+ if (i < kvm_s390_fac_list_mask_size())
+ kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
+ else
+ kvm->arch.model.fac->mask[i] = 0UL;
+ }
+
+ /* Populate the facility list initially. */
+ memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+
+ kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
+ kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
if (kvm_s390_crypto_init(kvm) < 0)
- goto out_crypto;
+ goto out_err;
spin_lock_init(&kvm->arch.float_int.lock);
- INIT_LIST_HEAD(&kvm->arch.float_int.list);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
init_waitqueue_head(&kvm->arch.ipte_wq);
mutex_init(&kvm->arch.ipte_mutex);
@@ -470,24 +1087,23 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
} else {
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
if (!kvm->arch.gmap)
- goto out_nogmap;
+ goto out_err;
kvm->arch.gmap->private = kvm;
kvm->arch.gmap->pfault_enabled = 0;
}
kvm->arch.css_support = 0;
kvm->arch.use_irqchip = 0;
+ kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock);
return 0;
-out_nogmap:
+out_err:
kfree(kvm->arch.crypto.crycb);
-out_crypto:
+ free_page((unsigned long)kvm->arch.model.fac);
debug_unregister(kvm->arch.dbf);
-out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
-out_err:
return rc;
}
@@ -536,6 +1152,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);
+ free_page((unsigned long)kvm->arch.model.fac);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb);
@@ -546,35 +1163,50 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
/* Section: vcpu related */
+static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
+ if (!vcpu->arch.gmap)
+ return -ENOMEM;
+ vcpu->arch.gmap->private = vcpu->kvm;
+
+ return 0;
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
- if (kvm_is_ucontrol(vcpu->kvm)) {
- vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
- if (!vcpu->arch.gmap)
- return -ENOMEM;
- vcpu->arch.gmap->private = vcpu->kvm;
- return 0;
- }
-
- vcpu->arch.gmap = vcpu->kvm->arch.gmap;
vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
KVM_SYNC_GPRS |
KVM_SYNC_ACRS |
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+ if (test_kvm_facility(vcpu->kvm, 129))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+
+ if (kvm_is_ucontrol(vcpu->kvm))
+ return __kvm_ucontrol_vcpu_init(vcpu);
+
return 0;
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- save_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs);
- restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ restore_fp_ctl(&vcpu->run->s.regs.fpc);
+ restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -584,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ save_fp_ctl(&vcpu->run->s.regs.fpc);
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
save_access_regs(vcpu->run->s.regs.acrs);
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- restore_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs);
}
@@ -615,16 +1255,27 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
kvm_s390_clear_local_irqs(vcpu);
}
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- return 0;
+ mutex_lock(&vcpu->kvm->lock);
+ vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+ mutex_unlock(&vcpu->kvm->lock);
+ if (!kvm_is_ucontrol(vcpu->kvm))
+ vcpu->arch.gmap = vcpu->kvm->arch.gmap;
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
- if (!test_vfacility(76))
+ if (!test_kvm_facility(vcpu->kvm, 76))
return;
+ vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
+
+ if (vcpu->kvm->arch.crypto.aes_kw)
+ vcpu->arch.sie_block->ecb3 |= ECB3_AES;
+ if (vcpu->kvm->arch.crypto.dea_kw)
+ vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
+
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}
@@ -645,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
return 0;
}
+static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
+
+ vcpu->arch.cpu_id = model->cpu_id;
+ vcpu->arch.sie_block->ibc = model->ibc;
+ vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
+}
+
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
@@ -653,27 +1313,31 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_SM |
CPUSTAT_STOPPED |
CPUSTAT_GED);
+ kvm_s390_vcpu_setup_model(vcpu);
+
vcpu->arch.sie_block->ecb = 6;
- if (test_vfacility(50) && test_vfacility(73))
+ if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10;
vcpu->arch.sie_block->ecb2 = 8;
- vcpu->arch.sie_block->eca = 0xD1002000U;
+ vcpu->arch.sie_block->eca = 0xC1002000U;
if (sclp_has_siif())
vcpu->arch.sie_block->eca |= 1;
- vcpu->arch.sie_block->fac = (int) (long) vfacilities;
- vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
- ICTL_TPROT;
+ if (sclp_has_sigpif())
+ vcpu->arch.sie_block->eca |= 0x10000000U;
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ vcpu->arch.sie_block->eca |= 0x00020000;
+ vcpu->arch.sie_block->ecd |= 0x20000000;
+ }
+ vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
if (rc)
return rc;
}
- hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
- get_cpu_id(&vcpu->arch.cpu_id);
- vcpu->arch.cpu_id.version = 0xff;
kvm_s390_vcpu_crypto_setup(vcpu);
@@ -702,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
+ vcpu->arch.host_vregs = &sie_page->vregs;
vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) {
@@ -741,7 +1406,7 @@ out:
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return kvm_cpu_has_interrupt(vcpu);
+ return kvm_s390_vcpu_has_irq(vcpu, 0);
}
void s390_vcpu_block(struct kvm_vcpu *vcpu)
@@ -869,6 +1534,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_S390_PFTOKEN:
r = get_user(vcpu->arch.pfault_token,
(u64 __user *)reg->addr);
+ if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+ kvm_clear_async_pf_completion_queue(vcpu);
break;
case KVM_REG_S390_PFCOMPARE:
r = get_user(vcpu->arch.pfault_compare,
@@ -1176,7 +1843,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
return 0;
if (psw_extint_disabled(vcpu))
return 0;
- if (kvm_cpu_has_interrupt(vcpu))
+ if (kvm_s390_vcpu_has_irq(vcpu, 0))
return 0;
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
return 0;
@@ -1234,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
return 0;
}
+static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
+{
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ u8 opcode;
+ int rc;
+
+ VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
+ trace_kvm_s390_sie_fault(vcpu);
+
+ /*
+ * We want to inject an addressing exception, which is defined as a
+ * suppressing or terminating exception. However, since we came here
+ * by a DAT access exception, the PSW still points to the faulting
+ * instruction since DAT exceptions are nullifying. So we've got
+ * to look up the current opcode to get the length of the instruction
+ * to be able to forward the PSW.
+ */
+ rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ psw->addr = __rewind_psw(*psw, -insn_length(opcode));
+
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+}
+
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
int rc = -1;
@@ -1265,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
}
}
- if (rc == -1) {
- VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
- trace_kvm_s390_sie_fault(vcpu);
- rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
+ if (rc == -1)
+ rc = vcpu_post_run_fault_in_sie(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
@@ -1341,6 +2030,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.pfault_token = kvm_run->s.regs.pft;
vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
+ if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
+ kvm_clear_async_pf_completion_queue(vcpu);
}
kvm_run->kvm_dirty_regs = 0;
}
@@ -1483,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
+/*
+ * store additional status at address
+ */
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long gpa)
+{
+ /* Only bits 0-53 are used for address formation */
+ if (!(gpa & ~0x3ff))
+ return 0;
+
+ return write_guest_abs(vcpu, gpa & ~0x3ff,
+ (void *)&vcpu->run->s.regs.vrs, 512);
+}
+
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ if (!test_kvm_facility(vcpu->kvm, 129))
+ return 0;
+
+ /*
+ * The guest VXRS are in the host VXRs due to the lazy
+ * copying in vcpu load/put. Let's update our copies before we save
+ * it into the save area.
+ */
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+
+ return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
+}
+
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
@@ -1559,15 +2279,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
- /* Need to lock access to action_bits to avoid a SIGP race condition */
- spin_lock(&vcpu->arch.local_int.lock);
- atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
-
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
- vcpu->arch.local_int.action_bits &=
- ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
- spin_unlock(&vcpu->arch.local_int.lock);
+ kvm_s390_clear_stop_irq(vcpu);
+ atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
@@ -1612,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mem_op *mop)
+{
+ void __user *uaddr = (void __user *)mop->buf;
+ void *tmpbuf = NULL;
+ int r, srcu_idx;
+ const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
+ | KVM_S390_MEMOP_F_CHECK_ONLY;
+
+ if (mop->flags & ~supported_flags)
+ return -EINVAL;
+
+ if (mop->size > MEM_OP_MAX_SIZE)
+ return -E2BIG;
+
+ if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
+ tmpbuf = vmalloc(mop->size);
+ if (!tmpbuf)
+ return -ENOMEM;
+ }
+
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ switch (mop->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+ break;
+ }
+ r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ if (r == 0) {
+ if (copy_to_user(uaddr, tmpbuf, mop->size))
+ r = -EFAULT;
+ }
+ break;
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+ break;
+ }
+ if (copy_from_user(tmpbuf, uaddr, mop->size)) {
+ r = -EFAULT;
+ break;
+ }
+ r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+
+ if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
+ kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+
+ vfree(tmpbuf);
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1621,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
long r;
switch (ioctl) {
+ case KVM_S390_IRQ: {
+ struct kvm_s390_irq s390irq;
+
+ r = -EFAULT;
+ if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
+ break;
+ r = kvm_s390_inject_vcpu(vcpu, &s390irq);
+ break;
+ }
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq;
@@ -1711,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
+ case KVM_S390_MEM_OP: {
+ struct kvm_s390_mem_op mem_op;
+
+ if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
+ r = kvm_s390_guest_mem_op(vcpu, &mem_op);
+ else
+ r = -EFAULT;
+ break;
+ }
+ case KVM_S390_SET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len > VCPU_IRQS_MAX_BUF ||
+ irq_state.len == 0 ||
+ irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_set_irq_state(vcpu,
+ (void __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
+ case KVM_S390_GET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len == 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_get_irq_state(vcpu,
+ (__u8 __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -1783,30 +2607,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
static int __init kvm_s390_init(void)
{
- int ret;
- ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
- if (ret)
- return ret;
-
- /*
- * guests can ask for up to 255+1 double words, we need a full page
- * to hold the maximum amount of facilities. On the other hand, we
- * only set facilities that are known to work in KVM.
- */
- vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!vfacilities) {
- kvm_exit();
- return -ENOMEM;
- }
- memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
- vfacilities[0] &= 0xff82fffbf47c2000UL;
- vfacilities[1] &= 0x005c000000000000UL;
- return 0;
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}
static void __exit kvm_s390_exit(void)
{
- free_page((unsigned long) vfacilities);
kvm_exit();
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a8f3d9b71c11..ca108b90ae56 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -18,12 +18,10 @@
#include <linux/hrtimer.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <asm/facility.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
-/* declare vfacilities extern */
-extern unsigned long *vfacilities;
-
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
#define TDB_FORMAT1 1
@@ -72,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
-static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
+typedef u8 __bitwise ar_t;
+
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
- u64 *address1, u64 *address2)
+ u64 *address1, u64 *address2,
+ ar_t *ar_b1, ar_t *ar_b2)
{
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -90,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+
+ if (ar_b1)
+ *ar_b1 = base1;
+ if (ar_b2)
+ *ar_b2 = base2;
}
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
@@ -100,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
}
-static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -109,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
if (disp2 & 0x80000)
disp2+=0xfff00000;
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
}
-static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
@@ -127,6 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}
+/* test availability of facility in a kvm instance */
+static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
+{
+ return __test_facility(nr, kvm->arch.model.fac->mask) &&
+ __test_facility(nr, kvm->arch.model.fac->list);
+}
+
+static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
+{
+ unsigned char *ptr;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return -EINVAL;
+ ptr = (unsigned char *) fac_list + (nr >> 3);
+ *ptr |= (0x80UL >> (nr & 7));
+ return 0;
+}
+
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
@@ -145,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq);
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid);
-void kvm_s390_reinject_io_int(struct kvm *kvm,
- struct kvm_s390_interrupt_info *inti);
+ u64 isc_mask, u32 schid);
+int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in intercept.c */
@@ -172,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -183,7 +219,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
/* is cmma enabled */
bool kvm_s390_cmma_enabled(struct kvm *kvm);
-int test_vfacility(unsigned long nr);
+unsigned long kvm_s390_fac_list_mask_size(void);
+extern unsigned long kvm_s390_fac_list_mask[];
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
@@ -228,11 +265,17 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
struct kvm_s390_irq *s390irq);
/* implemented in interrupt.c */
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
int psw_extint_disabled(struct kvm_vcpu *vcpu);
void kvm_s390_destroy_adapters(struct kvm *kvm);
-int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu);
+int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops;
+int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
+void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+ void __user *buf, int len);
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
+ __u8 __user *buf, int len);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 1be578d64dfc..d22d8ee1ff9d 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
struct kvm_vcpu *cpup;
s64 hostclk, val;
int i, rc;
+ ar_t ar;
u64 op2;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- op2 = kvm_s390_get_base_disp_s(vcpu);
+ op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, op2, &val, sizeof(val));
+ rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_spx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* get the value */
- rc = read_guest(vcpu, operand2, &address, sizeof(address));
+ rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stpx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
address = kvm_s390_get_prefix(vcpu);
/* get the value */
- rc = write_guest(vcpu, operand2, &address, sizeof(address));
+ rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id;
u64 ga;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stap++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_s(vcpu);
+ ga = kvm_s390_get_base_disp_s(vcpu, &ar);
if (ga & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
+ rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
addr = kvm_s390_logical_to_effective(vcpu, addr);
- if (kvm_s390_check_low_addr_protection(vcpu, addr))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr);
@@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti;
unsigned long len;
u32 tpi_data[3];
- int cc, rc;
+ int rc;
u64 addr;
+ ar_t ar;
- rc = 0;
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- cc = 0;
+
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
- if (!inti)
- goto no_interrupt;
- cc = 1;
+ if (!inti) {
+ kvm_s390_set_psw_cc(vcpu, 0);
+ return 0;
+ }
+
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word;
@@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
* provided area.
*/
len = sizeof(tpi_data) - 4;
- rc = write_guest(vcpu, addr, &tpi_data, len);
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
+ rc = write_guest(vcpu, addr, ar, &tpi_data, len);
+ if (rc) {
+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
+ goto reinject_interrupt;
+ }
} else {
/*
* Store the three-word I/O interruption code into
* the appropriate lowcore area.
*/
len = sizeof(tpi_data);
- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
+ /* failed writes to the low core are not recoverable */
rc = -EFAULT;
+ goto reinject_interrupt;
+ }
}
+
+ /* irq was successfully handed to the guest */
+ kfree(inti);
+ kvm_s390_set_psw_cc(vcpu, 1);
+ return 0;
+reinject_interrupt:
/*
* If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the
* interrupt.
*/
- if (!rc)
+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti);
- else
- kvm_s390_reinject_io_int(vcpu->kvm, inti);
-no_interrupt:
- /* Set condition code and we're done. */
- if (!rc)
- kvm_s390_set_psw_cc(vcpu, cc);
+ rc = -EFAULT;
+ }
+ /* don't set the cc, a pgm irq was injected or we drop to user space */
return rc ? -EFAULT : 0;
}
static int handle_tsch(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ const u64 isc_mask = 0xffUL << 24; /* all iscs set */
- inti = kvm_s390_get_io_int(vcpu->kvm, 0,
- vcpu->run->s.regs.gprs[1]);
+ /* a valid schid has at least one bit set */
+ if (vcpu->run->s.regs.gprs[1])
+ inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
+ vcpu->run->s.regs.gprs[1]);
/*
* Prepare exit to userspace.
@@ -337,19 +354,24 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
static int handle_stfl(struct kvm_vcpu *vcpu)
{
int rc;
+ unsigned int fac;
vcpu->stat.instruction_stfl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+ /*
+ * We need to shift the lower 32 facility bits (bit 0-31) from a u64
+ * into a u32 memory representation. They will remain bits 0-31.
+ */
+ fac = *vcpu->kvm->arch.model.fac->list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
- vfacilities, 4);
+ &fac, sizeof(fac));
if (rc)
return rc;
- VCPU_EVENT(vcpu, 5, "store facility list value %x",
- *(unsigned int *) vfacilities);
- trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
+ VCPU_EVENT(vcpu, 5, "store facility list value %x", fac);
+ trace_kvm_s390_handle_stfl(vcpu, fac);
return 0;
}
@@ -381,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (!(new_psw.mask & PSW32_MASK_BASE))
@@ -407,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gpsw = new_psw;
@@ -428,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->arch.stidp_data;
u64 operand2;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stidp++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
+ rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -462,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
@@ -473,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16);
}
+static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
+ u8 fc, u8 sel1, u16 sel2)
+{
+ vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
+ vcpu->run->s390_stsi.addr = addr;
+ vcpu->run->s390_stsi.ar = ar;
+ vcpu->run->s390_stsi.fc = fc;
+ vcpu->run->s390_stsi.sel1 = sel1;
+ vcpu->run->s390_stsi.sel2 = sel2;
+}
+
static int handle_stsi(struct kvm_vcpu *vcpu)
{
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
@@ -481,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0;
u64 operand2;
int rc = 0;
+ ar_t ar;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -503,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return 0;
}
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 0xfff)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -527,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
break;
}
- rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
+ rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;
}
+ if (vcpu->kvm->arch.user_stsi) {
+ insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
+ rc = -EREMOTE;
+ }
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
- return 0;
+ return rc;
out_no_data:
kvm_s390_set_psw_cc(vcpu, 3);
out:
@@ -665,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_protection(vcpu, start))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, start))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
@@ -771,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -786,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -809,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -831,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -842,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctlg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -857,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -879,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -901,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -926,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa;
int ret = 0, cc = 0;
bool writable;
+ ar_t ar;
vcpu->stat.instruction_tprot++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
+ kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
/* we only handle the Linux memory detection case:
* access key == 0
@@ -941,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_lock(vcpu);
- ret = guest_translate_address(vcpu, address1, &gpa, 1);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
if (ret == PGM_PROTECTION) {
/* Write protected? Try again with read-only... */
cc = 1;
- ret = guest_translate_address(vcpu, address1, &gpa, 0);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
}
if (ret) {
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 6651f9f73973..72e58bd2bee7 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
struct kvm_s390_local_interrupt *li;
int cpuflags;
int rc;
+ int ext_call_pending;
li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags);
- if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
+ ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
+ if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else {
*reg &= 0xffffffff00000000UL;
- if (cpuflags & CPUSTAT_ECALL_PEND)
+ if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (cpuflags & CPUSTAT_STOPPED)
*reg |= SIGP_STATUS_STOPPED;
@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
}
static int __sigp_external_call(struct kvm_vcpu *vcpu,
- struct kvm_vcpu *dst_vcpu)
+ struct kvm_vcpu *dst_vcpu, u64 *reg)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL,
@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu,
int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
- if (!rc)
+ if (rc == -EBUSY) {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_EXT_CALL_PENDING;
+ return SIGP_CC_STATUS_STORED;
+ } else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
dst_vcpu->vcpu_id);
-
- return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
-}
-
-static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
-{
- struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
- int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
-
- spin_lock(&li->lock);
- if (li->action_bits & ACTION_STOP_ON_STOP) {
- /* another SIGP STOP is pending */
- rc = SIGP_CC_BUSY;
- goto out;
- }
- if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
- if ((action & ACTION_STORE_ON_STOP) != 0)
- rc = -ESHUTDOWN;
- goto out;
}
- set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
- li->action_bits |= action;
- atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
- kvm_s390_vcpu_wakeup(dst_vcpu);
-out:
- spin_unlock(&li->lock);
- return rc;
+ return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ };
int rc;
- rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
- VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
+ if (rc == -EBUSY)
+ rc = SIGP_CC_BUSY;
+ else if (rc == 0)
+ VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
+ dst_vcpu->vcpu_id);
return rc;
}
@@ -151,20 +139,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg)
{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
+ };
int rc;
- rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
- ACTION_STORE_ON_STOP);
- VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
- dst_vcpu->vcpu_id);
-
- if (rc == -ESHUTDOWN) {
- /* If the CPU has already been stopped, we still have
- * to save the status when doing stop-and-store. This
- * has to be done after unlocking all spinlocks. */
- rc = kvm_s390_store_status_unloaded(dst_vcpu,
- KVM_S390_STORE_STATUS_NOADDR);
- }
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
+ if (rc == -EBUSY)
+ rc = SIGP_CC_BUSY;
+ else if (rc == 0)
+ VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
+ dst_vcpu->vcpu_id);
return rc;
}
@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u32 address, u64 *reg)
{
- struct kvm_s390_local_interrupt *li;
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_SET_PREFIX,
+ .u.prefix.address = address & 0x7fffe000u,
+ };
int rc;
- li = &dst_vcpu->arch.local_int;
-
/*
* Make sure the new value is valid memory. We only need to check the
* first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB.
*/
- address &= 0x7fffe000u;
- if (kvm_is_error_gpa(vcpu->kvm, address)) {
+ if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED;
}
- spin_lock(&li->lock);
- /* cpu must be in stopped state */
- if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
+ if (rc == -EBUSY) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
- rc = SIGP_CC_STATUS_STORED;
- goto out_li;
+ return SIGP_CC_STATUS_STORED;
+ } else if (rc == 0) {
+ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
+ dst_vcpu->vcpu_id, irq.u.prefix.address);
}
- li->irq.prefix.address = address;
- set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
- kvm_s390_vcpu_wakeup(dst_vcpu);
- rc = SIGP_CC_ORDER_CODE_ACCEPTED;
-
- VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
- address);
-out_li:
- spin_unlock(&li->lock);
return rc;
}
@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
int flags;
int rc;
- spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
- spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
/* handle (RE)START in user space */
int rc = -EOPNOTSUPP;
+ /* make sure we don't race with STOP irq injection */
spin_lock(&li->lock);
- if (li->action_bits & ACTION_STOP_ON_STOP)
+ if (kvm_s390_is_stop_irq_pending(dst_vcpu))
rc = SIGP_CC_BUSY;
spin_unlock(&li->lock);
@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
break;
case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
- rc = __sigp_external_call(vcpu, dst_vcpu);
+ rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
break;
case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
@@ -394,6 +371,56 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
return rc;
}
+static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
+{
+ if (!vcpu->kvm->arch.user_sigp)
+ return 0;
+
+ switch (order_code) {
+ case SIGP_SENSE:
+ case SIGP_EXTERNAL_CALL:
+ case SIGP_EMERGENCY_SIGNAL:
+ case SIGP_COND_EMERGENCY_SIGNAL:
+ case SIGP_SENSE_RUNNING:
+ return 0;
+ /* update counters as we're directly dropping to user space */
+ case SIGP_STOP:
+ vcpu->stat.instruction_sigp_stop++;
+ break;
+ case SIGP_STOP_AND_STORE_STATUS:
+ vcpu->stat.instruction_sigp_stop_store_status++;
+ break;
+ case SIGP_STORE_STATUS_AT_ADDRESS:
+ vcpu->stat.instruction_sigp_store_status++;
+ break;
+ case SIGP_STORE_ADDITIONAL_STATUS:
+ vcpu->stat.instruction_sigp_store_adtl_status++;
+ break;
+ case SIGP_SET_PREFIX:
+ vcpu->stat.instruction_sigp_prefix++;
+ break;
+ case SIGP_START:
+ vcpu->stat.instruction_sigp_start++;
+ break;
+ case SIGP_RESTART:
+ vcpu->stat.instruction_sigp_restart++;
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ vcpu->stat.instruction_sigp_init_cpu_reset++;
+ break;
+ case SIGP_CPU_RESET:
+ vcpu->stat.instruction_sigp_cpu_reset++;
+ break;
+ default:
+ vcpu->stat.instruction_sigp_unknown++;
+ }
+
+ VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
+ order_code);
+
+ return 1;
+}
+
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
@@ -407,7 +434,9 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- order_code = kvm_s390_get_base_disp_rs(vcpu);
+ order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
+ if (handle_sigp_order_in_user_space(vcpu, order_code))
+ return -EOPNOTSUPP;
if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1];
@@ -447,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu;
- u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
+ u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 647e9d6a4818..653a7ec09ef5 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets,
* Trace point for a vcpu's stop requests.
*/
TRACE_EVENT(kvm_s390_stop_request,
- TP_PROTO(unsigned int action_bits),
- TP_ARGS(action_bits),
+ TP_PROTO(unsigned char stop_irq, unsigned char flags),
+ TP_ARGS(stop_irq, flags),
TP_STRUCT__entry(
- __field(unsigned int, action_bits)
+ __field(unsigned char, stop_irq)
+ __field(unsigned char, flags)
),
TP_fast_assign(
- __entry->action_bits = action_bits;
+ __entry->stop_irq = stop_irq;
+ __entry->flags = flags;
),
- TP_printk("stop request, action_bits = %08x",
- __entry->action_bits)
+ TP_printk("stop request, stop irq = %u, flags = %08x",
+ __entry->stop_irq, __entry->flags)
);
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 034a35a3e9c1..d6c9991f7797 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -12,7 +12,15 @@
#include <linux/smp.h>
#include <asm/io.h>
-int spin_retry = 1000;
+int spin_retry = -1;
+
+static int __init spin_retry_init(void)
+{
+ if (spin_retry < 0)
+ spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
+ return 0;
+}
+early_initcall(spin_retry_init);
/**
* spin_retry= parameter
@@ -24,6 +32,11 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
+{
+ asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
+}
+
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -46,6 +59,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
@@ -84,6 +99,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
@@ -100,11 +117,19 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
+ unsigned int cpu = SPINLOCK_LOCKVAL;
+ unsigned int owner;
int count;
- for (count = spin_retry; count > 0; count--)
- if (arch_spin_trylock_once(lp))
- return 1;
+ for (count = spin_retry; count > 0; count--) {
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return 1;
+ } else if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
+ }
return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
@@ -126,8 +151,11 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
}
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
- if ((int) old < 0)
+ if ((int) old < 0) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
}
@@ -141,8 +169,11 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if ((int) old < 0)
+ if ((int) old < 0) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return 1;
}
@@ -173,6 +204,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
}
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -201,6 +234,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
smp_rmb();
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -214,8 +249,11 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if (old)
+ if (old) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return 1;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9065d5aa3932..3ff86533f7db 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -171,7 +171,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
table = table + ((address >> 20) & 0x7ff);
if (bad_address(table))
goto bad;
- pr_cont(KERN_CONT "S:%016lx ", *table);
+ pr_cont("S:%016lx ", *table);
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
goto out;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
@@ -261,7 +261,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
return;
if (!printk_ratelimit())
return;
- printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
+ printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk(KERN_CONT "\n");
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 639fce464008..5c586c78ca8d 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -235,10 +235,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
- down_read(&mm->mmap_sem);
- ret = get_user_pages(current, mm, start,
- nr_pages - nr, write, 0, pages, NULL);
- up_read(&mm->mmap_sem);
+ ret = get_user_pages_unlocked(current, mm, start,
+ nr_pages - nr, write, 0, pages);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 3c80d2e38f03..210ffede0153 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
if (!MACHINE_HAS_HPAGE)
@@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
{
return 0;
}
-
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmdp, int write)
-{
- struct page *page;
-
- if (!MACHINE_HAS_HPAGE)
- return NULL;
-
- page = pmd_page(*pmdp);
- if (page)
- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
- return page;
-}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7235e01fd67..d35b15113b17 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
break;
case 0x2827: /* zEC12 */
case 0x2828: /* zEC12 */
- default:
order = 5;
break;
+ case 0x2964: /* z13 */
+ default:
+ order = 7;
+ break;
}
/* Limit number of empty zero pages for small memory sizes */
- if (order > 2 && totalram_pages <= 16384)
- order = 2;
+ while (order > 2 && (totalram_pages >> 10) < (1UL << order))
+ order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 9b436c21195e..179a2c20b01f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,8 +28,12 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/compat.h>
+#include <linux/security.h>
#include <asm/pgalloc.h>
+unsigned long mmap_rnd_mask;
+unsigned long mmap_align_mask;
+
static unsigned long stack_maxrandom_size(void)
{
if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
{
if (!(current->flags & PF_RANDOMIZE))
return 0;
- /* 8MB randomization for mmap_base */
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+ if (is_32bit_task())
+ return (get_random_int() & 0x7ff) << PAGE_SHIFT;
+ else
+ return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,109 @@ static inline unsigned long mmap_base(void)
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
}
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
+ int do_color_align;
+
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = !is_32bit_task();
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ struct vm_unmapped_area_info info;
+ int do_color_align;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = !is_32bit_task();
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ addr = vm_unmapped_area(&info);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ if (addr & ~PAGE_MASK) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+
+ return addr;
+}
+
+unsigned long randomize_et_dyn(void)
+{
+ unsigned long base;
+
+ base = STACK_TOP / 3 * 2;
+ if (!is_32bit_task())
+ /* Align to 4GB */
+ base &= ~((1UL << 32) - 1);
+ return base + mmap_rnd();
+}
+
#ifndef CONFIG_64BIT
/*
@@ -177,4 +286,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
}
}
+static int __init setup_mmap_rnd(void)
+{
+ struct cpuid cpu_id;
+
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x9672:
+ case 0x2064:
+ case 0x2066:
+ case 0x2084:
+ case 0x2086:
+ case 0x2094:
+ case 0x2096:
+ case 0x2097:
+ case 0x2098:
+ case 0x2817:
+ case 0x2818:
+ case 0x2827:
+ case 0x2828:
+ mmap_rnd_mask = 0x7ffUL;
+ mmap_align_mask = 0UL;
+ break;
+ case 0x2964: /* z13 */
+ default:
+ mmap_rnd_mask = 0x3ff80UL;
+ mmap_align_mask = 0x7fUL;
+ break;
+ }
+ return 0;
+}
+early_initcall(setup_mmap_rnd);
+
#endif
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 3cf8cc03fff6..b2c1542f2ba2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -527,7 +527,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 53) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
- gaddr & 0xffe0000000000000))
+ gaddr & 0xffe0000000000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
@@ -535,7 +535,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 42) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
- gaddr & 0xfffffc0000000000))
+ gaddr & 0xfffffc0000000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
@@ -543,7 +543,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 31) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
- gaddr & 0xffffffff80000000))
+ gaddr & 0xffffffff80000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3290f11ae1d9..b2c76f64c530 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
}
/* Create a virtual mapping cookie for a PCI BAR */
-void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
+void __iomem *pci_iomap_range(struct pci_dev *pdev,
+ int bar,
+ unsigned long offset,
+ unsigned long max)
{
struct zpci_dev *zdev = get_zdev(pdev);
u64 addr;
@@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
idx = zdev->bars[bar].map_idx;
spin_lock(&zpci_iomap_lock);
- zpci_iomap_start[idx].fh = zdev->fh;
- zpci_iomap_start[idx].bar = bar;
+ if (zpci_iomap_start[idx].count++) {
+ BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
+ zpci_iomap_start[idx].bar != bar);
+ } else {
+ zpci_iomap_start[idx].fh = zdev->fh;
+ zpci_iomap_start[idx].bar = bar;
+ }
+ /* Detect overrun */
+ BUG_ON(!zpci_iomap_start[idx].count);
spin_unlock(&zpci_iomap_lock);
addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
- return (void __iomem *) addr;
+ return (void __iomem *) addr + offset;
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
}
-EXPORT_SYMBOL_GPL(pci_iomap);
+EXPORT_SYMBOL(pci_iomap);
void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
@@ -285,11 +301,15 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
spin_lock(&zpci_iomap_lock);
- zpci_iomap_start[idx].fh = 0;
- zpci_iomap_start[idx].bar = 0;
+ /* Detect underrun */
+ BUG_ON(!zpci_iomap_start[idx].count);
+ if (!--zpci_iomap_start[idx].count) {
+ zpci_iomap_start[idx].fh = 0;
+ zpci_iomap_start[idx].bar = 0;
+ }
spin_unlock(&zpci_iomap_lock);
}
-EXPORT_SYMBOL_GPL(pci_iounmap);
+EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
@@ -463,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
}
-static void zpci_map_resources(struct zpci_dev *zdev)
+static void zpci_map_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -479,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
}
-static void zpci_unmap_resources(struct zpci_dev *zdev)
+static void zpci_unmap_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -631,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
zdev->pdev = pdev;
pdev->dev.groups = zpci_attr_groups;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
res = &pdev->resource[i];
@@ -643,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
return 0;
}
+void pcibios_release_device(struct pci_dev *pdev)
+{
+ zpci_unmap_resources(pdev);
+}
+
int pcibios_enable_device(struct pci_dev *pdev, int mask)
{
struct zpci_dev *zdev = get_zdev(pdev);
@@ -650,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
zdev->pdev = pdev;
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
return pci_enable_resources(pdev, mask);
}
@@ -659,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
- zpci_unmap_resources(zdev);
zpci_fmb_disable_device(zdev);
zpci_debug_exit_device(zdev);
zdev->pdev = NULL;
@@ -668,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int zpci_restore(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
int ret = 0;
if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -678,7 +700,7 @@ static int zpci_restore(struct device *dev)
if (ret)
goto out;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
@@ -689,12 +711,14 @@ out:
static int zpci_freeze(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
if (zdev->state != ZPCI_FN_STATE_ONLINE)
return 0;
zpci_unregister_ioat(zdev, 0);
+ zpci_unmap_resources(pdev);
return clp_disable_fh(zdev);
}
@@ -756,8 +780,8 @@ static int zpci_scan_bus(struct zpci_dev *zdev)
zpci_cleanup_bus_resources(zdev);
return -EIO;
}
-
zdev->bus->max_bus_speed = zdev->max_bus_speed;
+ pci_bus_add_devices(zdev->bus);
return 0;
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 62c5ea6d8682..b1bb2b72302c 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -55,7 +55,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
if (ret)
goto out;
- io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (copy_from_user(buf, user_buffer, length))
goto out;
- memcpy_toio(io_addr, buf, length);
- ret = 0;
+ ret = zpci_memcpy_toio(io_addr, buf, length);
out:
if (buf != local_buf)
kfree(buf);
@@ -96,18 +95,18 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
ret = get_pfn(mmio_addr, VM_READ, &pfn);
if (ret)
goto out;
- io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
- ret = -EFAULT;
- if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+ ret = -EFAULT;
goto out;
-
- memcpy_fromio(buf, io_addr, length);
-
- if (copy_to_user(user_buffer, buf, length))
+ }
+ ret = zpci_memcpy_fromio(buf, io_addr, length);
+ if (ret)
goto out;
+ if (copy_to_user(user_buffer, buf, length))
+ ret = -EFAULT;
- ret = 0;
out:
if (buf != local_buf)
kfree(buf);