summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 23:33:21 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 23:33:21 +0300
commit1db2a5c11e495366bff35cf7445d494703f7febe (patch)
tree3347dd1cab0a2a96a4333524298a62132eb22336
parenta39b863342b8aba52390092be95db58f6ed56061 (diff)
parentcef7125def4dd104769f400c941199614da0aca1 (diff)
downloadlinux-1db2a5c11e495366bff35cf7445d494703f7febe.tar.xz
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (85 commits) [S390] provide documentation for hvc_iucv kernel parameter. [S390] convert ctcm printks to dev_xxx and pr_xxx macros. [S390] convert zfcp printks to pr_xxx macros. [S390] convert vmlogrdr printks to pr_xxx macros. [S390] convert zfcp dumper printks to pr_xxx macros. [S390] convert cpu related printks to pr_xxx macros. [S390] convert qeth printks to dev_xxx and pr_xxx macros. [S390] convert sclp printks to pr_xxx macros. [S390] convert iucv printks to dev_xxx and pr_xxx macros. [S390] convert ap_bus printks to pr_xxx macros. [S390] convert dcssblk and extmem printks messages to pr_xxx macros. [S390] convert monwriter printks to pr_xxx macros. [S390] convert s390 debug feature printks to pr_xxx macros. [S390] convert monreader printks to pr_xxx macros. [S390] convert appldata printks to pr_xxx macros. [S390] convert setup printks to pr_xxx macros. [S390] convert hypfs printks to pr_xxx macros. [S390] convert time printks to pr_xxx macros. [S390] convert cpacf printks to pr_xxx macros. [S390] convert cio printks to pr_xxx macros. ...
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--arch/powerpc/include/asm/elf.h2
-rw-r--r--arch/powerpc/kernel/vdso.c3
-rw-r--r--arch/s390/Kconfig41
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/appldata/appldata.h4
-rw-r--r--arch/s390/appldata/appldata_base.c12
-rw-r--r--arch/s390/appldata/appldata_os.c21
-rw-r--r--arch/s390/crypto/aes_s390.c14
-rw-r--r--arch/s390/hypfs/hypfs_diag.c10
-rw-r--r--arch/s390/hypfs/inode.c14
-rw-r--r--arch/s390/include/asm/auxvec.h2
-rw-r--r--arch/s390/include/asm/bug.h5
-rw-r--r--arch/s390/include/asm/byteorder.h72
-rw-r--r--arch/s390/include/asm/elf.h16
-rw-r--r--arch/s390/include/asm/fcx.h4
-rw-r--r--arch/s390/include/asm/ftrace.h8
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/mmu.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h5
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/qdio.h16
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/smp.h5
-rw-r--r--arch/s390/include/asm/sysinfo.h11
-rw-r--r--arch/s390/include/asm/system.h24
-rw-r--r--arch/s390/include/asm/timer.h9
-rw-r--r--arch/s390/include/asm/vdso.h39
-rw-r--r--arch/s390/kernel/Makefile18
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/cpcmd.c7
-rw-r--r--arch/s390/kernel/debug.c39
-rw-r--r--arch/s390/kernel/entry.S58
-rw-r--r--arch/s390/kernel/entry64.S58
-rw-r--r--arch/s390/kernel/head.S49
-rw-r--r--arch/s390/kernel/head31.S28
-rw-r--r--arch/s390/kernel/head64.S24
-rw-r--r--arch/s390/kernel/mcount.S56
-rw-r--r--arch/s390/kernel/processor.c98
-rw-r--r--arch/s390/kernel/ptrace.c10
-rw-r--r--arch/s390/kernel/s390_ksyms.c5
-rw-r--r--arch/s390/kernel/setup.c185
-rw-r--r--arch/s390/kernel/smp.c201
-rw-r--r--arch/s390/kernel/time.c278
-rw-r--r--arch/s390/kernel/topology.c41
-rw-r--r--arch/s390/kernel/vdso.c234
-rw-r--r--arch/s390/kernel/vdso32/Makefile55
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S39
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S128
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S82
-rw-r--r--arch/s390/kernel/vdso32/note.S12
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S138
-rw-r--r--arch/s390/kernel/vdso32/vdso32_wrapper.S13
-rw-r--r--arch/s390/kernel/vdso64/Makefile55
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S39
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S89
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S56
-rw-r--r--arch/s390/kernel/vdso64/note.S12
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S138
-rw-r--r--arch/s390/kernel/vdso64/vdso64_wrapper.S13
-rw-r--r--arch/s390/kernel/vtime.c11
-rw-r--r--arch/s390/mm/extmem.c106
-rw-r--r--arch/sh/include/asm/elf.h2
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c3
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/vdso/vdso32-setup.c2
-rw-r--r--arch/x86/vdso/vma.c2
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/hvc_iucv.c850
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_devmap.c19
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_proc.c28
-rw-r--r--drivers/s390/block/dcssblk.c77
-rw-r--r--drivers/s390/block/xpram.c41
-rw-r--r--drivers/s390/char/monreader.c41
-rw-r--r--drivers/s390/char/monwriter.c5
-rw-r--r--drivers/s390/char/sclp_cmd.c29
-rw-r--r--drivers/s390/char/sclp_config.c10
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c12
-rw-r--r--drivers/s390/char/sclp_sdias.c18
-rw-r--r--drivers/s390/char/sclp_vt220.c33
-rw-r--r--drivers/s390/char/vmcp.c11
-rw-r--r--drivers/s390/char/vmlogrdr.c26
-rw-r--r--drivers/s390/char/vmur.c15
-rw-r--r--drivers/s390/char/zcore.c14
-rw-r--r--drivers/s390/cio/blacklist.c14
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc.c8
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c245
-rw-r--r--drivers/s390/cio/cio.h18
-rw-r--r--drivers/s390/cio/cmf.c63
-rw-r--r--drivers/s390/cio/css.c12
-rw-r--r--drivers/s390/cio/device.c237
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c46
-rw-r--r--drivers/s390/cio/device_pgid.c2
-rw-r--r--drivers/s390/cio/device_status.c4
-rw-r--r--drivers/s390/cio/qdio.h33
-rw-r--r--drivers/s390/cio/qdio_debug.c104
-rw-r--r--drivers/s390/cio/qdio_debug.h112
-rw-r--r--drivers/s390/cio/qdio_main.c648
-rw-r--r--drivers/s390/cio/qdio_perf.c8
-rw-r--r--drivers/s390/cio/qdio_perf.h5
-rw-r--r--drivers/s390/cio/qdio_setup.c145
-rw-r--r--drivers/s390/cio/qdio_thinint.c29
-rw-r--r--drivers/s390/crypto/ap_bus.c212
-rw-r--r--drivers/s390/crypto/ap_bus.h6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c10
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c10
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c10
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c10
-rw-r--r--drivers/s390/net/ctcm_fsms.c46
-rw-r--r--drivers/s390/net/ctcm_main.c72
-rw-r--r--drivers/s390/net/ctcm_main.h6
-rw-r--r--drivers/s390/net/ctcm_mpc.c15
-rw-r--r--drivers/s390/net/ctcm_sysfs.c3
-rw-r--r--drivers/s390/net/lcs.c92
-rw-r--r--drivers/s390/net/netiucv.c64
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c176
-rw-r--r--drivers/s390/net/qeth_l2_main.c37
-rw-r--r--drivers/s390/net/qeth_l3_main.c211
-rw-r--r--drivers/s390/scsi/zfcp_aux.c28
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c3
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c3
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c10
-rw-r--r--drivers/s390/scsi/zfcp_fc.c3
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c3
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c3
-rw-r--r--drivers/s390/sysinfo.c127
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/net/iucv/iucv.h45
-rw-r--r--net/iucv/af_iucv.c19
-rw-r--r--net/iucv/iucv.c161
143 files changed, 4876 insertions, 2300 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 68e7694c0ac7..a2d8805c03d5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -823,6 +823,9 @@ and is between 256 and 4096 characters. It is defined in the file
hlt [BUGS=ARM,SH]
+ hvc_iucv= [S390] Number of z/VM IUCV Hypervisor console (HVC)
+ back-ends. Valid parameters: 0..8
+
i8042.debug [HW] Toggle i8042 debug mode
i8042.direct [HW] Put keyboard port into non-translated mode
i8042.dumbkbd [HW] Pretend that controller can only read data from
@@ -2292,6 +2295,14 @@ and is between 256 and 4096 characters. It is defined in the file
See comment before function dc390_setup() in
drivers/scsi/tmscsim.c.
+ topology= [S390]
+ Format: {off | on}
+ Specify if the kernel should make use of the cpu
+ topology informations if the hardware supports these.
+ The scheduler will make use of these informations and
+ e.g. base its process migration decisions on it.
+ Default is off.
+
tp720= [HW,PS2]
trix= [HW,OSS] MediaTrix AudioTrix Pro
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index d812929390e4..cd46f023ec6d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack);
+ int uses_interp);
#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 65639a43e644..f7ec7d0888fe 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8116a3328a19..8152fefc97b9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -43,6 +43,9 @@ config GENERIC_HWEIGHT
config GENERIC_TIME
def_bool y
+config GENERIC_TIME_VSYSCALL
+ def_bool y
+
config GENERIC_CLOCKEVENTS
def_bool y
@@ -66,10 +69,15 @@ config PGSTE
bool
default y if KVM
+config VIRT_CPU_ACCOUNTING
+ def_bool y
+
mainmenu "Linux Kernel Configuration"
config S390
def_bool y
+ select USE_GENERIC_SMP_HELPERS if SMP
+ select HAVE_FUNCTION_TRACER
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
@@ -225,6 +233,14 @@ config MARCH_Z9_109
Class (z9 BC). The kernel will be slightly faster but will not
work on older machines such as the z990, z890, z900, and z800.
+config MARCH_Z10
+ bool "IBM System z10"
+ help
+ Select this to enable optimizations for IBM System z10. The
+ kernel will be slightly faster but will not work on older
+ machines such as the z990, z890, z900, z800, z9-109, z9-ec
+ and z9-bc.
+
endchoice
config PACK_STACK
@@ -343,16 +359,6 @@ config QDIO
If unsure, say Y.
-config QDIO_DEBUG
- bool "Extended debugging information"
- depends on QDIO
- help
- Say Y here to get extended debugging output in
- /sys/kernel/debug/s390dbf/qdio...
- Warning: this option reduces the performance of the QDIO module.
-
- If unsure, say N.
-
config CHSC_SCH
tristate "Support for CHSC subchannels"
help
@@ -466,22 +472,9 @@ config PAGE_STATES
hypervisor. The ESSA instruction is used to do the states
changes between a page that has content and the unused state.
-config VIRT_TIMER
- bool "Virtual CPU timer support"
- help
- This provides a kernel interface for virtual CPU timers.
- Default is disabled.
-
-config VIRT_CPU_ACCOUNTING
- bool "Base user process accounting on virtual cpu timer"
- depends on VIRT_TIMER
- help
- Select this option to use CPU timer deltas to do user
- process accounting.
-
config APPLDATA_BASE
bool "Linux - VM Monitor Stream, base infrastructure"
- depends on PROC_FS && VIRT_TIMER=y
+ depends on PROC_FS
help
This provides a kernel interface for creating and updating z/VM APPLDATA
monitor records. The monitor records are updated at certain time
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 792a4e7743ce..578c61f15a4b 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
+cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index 17a2636fec0a..f0b23fc759ba 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -26,10 +26,6 @@
#define CTL_APPLDATA_NET_SUM 2125
#define CTL_APPLDATA_PROC 2126
-#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
-#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
-#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
-
struct appldata_ops {
struct list_head list;
struct ctl_table_header *sysctl_header;
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index a06a47cdd5e0..27b70d8a359c 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -10,6 +10,9 @@
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
+#define KMSG_COMPONENT "appldata"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -32,7 +35,6 @@
#include "appldata.h"
-#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
sampling interval in
milliseconds */
@@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
(unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0) {
- P_ERROR("START DIAG 0xDC for %s failed, "
- "return code: %d\n", ops->name, rc);
+ pr_err("Starting the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
module_put(ops->owner);
} else
ops->active = 1;
@@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
(unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0)
- P_ERROR("STOP DIAG 0xDC for %s failed, "
- "return code: %d\n", ops->name, rc);
+ pr_err("Stopping the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
module_put(ops->owner);
}
spin_unlock(&appldata_ops_lock);
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index eb44f9f8ab91..55c80ffd42b9 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -9,6 +9,9 @@
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
+#define KMSG_COMPONENT "appldata"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -22,7 +25,6 @@
#include "appldata.h"
-#define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
@@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data)
(unsigned long) ops.data, new_size,
ops.mod_lvl);
if (rc != 0)
- P_ERROR("os: START NEW DIAG 0xDC failed, "
- "return code: %d, new size = %i\n", rc,
- new_size);
+ pr_err("Starting a new OS data collection "
+ "failed with rc=%d\n", rc);
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_STOP_REC,
(unsigned long) ops.data, ops.size,
ops.mod_lvl);
if (rc != 0)
- P_ERROR("os: STOP OLD DIAG 0xDC failed, "
- "return code: %d, old size = %i\n", rc,
- ops.size);
- else
- P_INFO("os: old record size = %i stopped\n",
- ops.size);
+ pr_err("Stopping a faulty OS data "
+ "collection failed with rc=%d\n", rc);
}
ops.size = new_size;
}
@@ -178,8 +175,8 @@ static int __init appldata_os_init(void)
max_size = sizeof(struct appldata_os_data) +
(NR_CPUS * sizeof(struct appldata_os_per_cpu));
if (max_size > APPLDATA_MAX_REC_SIZE) {
- P_ERROR("Max. size of OS record = %i, bigger than maximum "
- "record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE);
+ pr_err("Maximum OS record size %i exceeds the maximum "
+ "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
rc = -ENOMEM;
goto out;
}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index e33f32b54c08..c42cd898f68b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -17,6 +17,9 @@
*
*/
+#define KMSG_COMPONENT "aes_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/err.h>
@@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.cip)) {
- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ pr_err("Allocating AES fallback algorithm %s failed\n",
+ name);
return PTR_ERR(sctx->fallback.blk);
}
@@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.blk)) {
- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ pr_err("Allocating AES fallback algorithm %s failed\n",
+ name);
return PTR_ERR(sctx->fallback.blk);
}
@@ -515,9 +520,8 @@ static int __init aes_s390_init(void)
/* z9 109 and z9 BC/EC only support 128 bit key length */
if (keylen_flag == AES_KEYLEN_128)
- printk(KERN_INFO
- "aes_s390: hardware acceleration only available for "
- "128 bit keys\n");
+ pr_info("AES hardware acceleration is only available for"
+ " 128-bit keys\n");
ret = crypto_register_alg(&aes_alg);
if (ret)
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index b9a1ce1f28e4..b1e892a43816 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -3,10 +3,13 @@
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
* implementation.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
+#define KMSG_COMPONENT "hypfs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -527,13 +530,14 @@ __init int hypfs_diag_init(void)
int rc;
if (diag204_probe()) {
- printk(KERN_ERR "hypfs: diag 204 not working.");
+ pr_err("The hardware system does not support hypfs\n");
return -ENODATA;
}
rc = diag224_get_name_table();
if (rc) {
diag204_free_buffer();
- printk(KERN_ERR "hypfs: could not get name table.\n");
+ pr_err("The hardware system does not provide all "
+ "functions required by hypfs\n");
}
return rc;
}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 8aadcd7a7cf8..9d4f8e6c0800 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -2,10 +2,13 @@
* arch/s390/hypfs/inode.c
* Hypervisor filesystem for Linux on s390.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
+#define KMSG_COMPONENT "hypfs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
else
rc = hypfs_diag_create_files(sb, sb->s_root);
if (rc) {
- printk(KERN_ERR "hypfs: Update failed\n");
+ pr_err("Updating the hypfs tree failed\n");
hypfs_delete_tree(sb->s_root);
goto out;
}
@@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
break;
case opt_err:
default:
- printk(KERN_ERR "hypfs: Unrecognized mount option "
- "\"%s\" or missing value\n", str);
+ pr_err("%s is not a valid mount option\n", str);
return -EINVAL;
}
}
@@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
}
hypfs_update_update(sb);
sb->s_root = root_dentry;
- printk(KERN_INFO "hypfs: Hypervisor filesystem mounted\n");
+ pr_info("Hypervisor filesystem mounted\n");
return 0;
err_tree:
@@ -513,7 +515,7 @@ fail_sysfs:
if (!MACHINE_IS_VM)
hypfs_diag_exit();
fail_diag:
- printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
+ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
diff --git a/arch/s390/include/asm/auxvec.h b/arch/s390/include/asm/auxvec.h
index 0d340720fd99..a1f153e89133 100644
--- a/arch/s390/include/asm/auxvec.h
+++ b/arch/s390/include/asm/auxvec.h
@@ -1,4 +1,6 @@
#ifndef __ASMS390_AUXVEC_H
#define __ASMS390_AUXVEC_H
+#define AT_SYSINFO_EHDR 33
+
#endif
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 384e3621e341..7efd0abe8887 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -47,7 +47,10 @@
#endif /* CONFIG_DEBUG_BUGVERBOSE */
-#define BUG() __EMIT_BUG(0)
+#define BUG() do { \
+ __EMIT_BUG(0); \
+ for (;;); \
+} while (0)
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h
index 1fe2492baa8d..8bcf277c8468 100644
--- a/arch/s390/include/asm/byteorder.h
+++ b/arch/s390/include/asm/byteorder.h
@@ -11,32 +11,39 @@
#include <asm/types.h>
-#ifdef __GNUC__
+#define __BIG_ENDIAN
+
+#ifndef __s390x__
+# define __SWAB_64_THRU_32__
+#endif
#ifdef __s390x__
-static inline __u64 ___arch__swab64p(const __u64 *x)
+static inline __u64 __arch_swab64p(const __u64 *x)
{
__u64 result;
asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
return result;
}
+#define __arch_swab64p __arch_swab64p
-static inline __u64 ___arch__swab64(__u64 x)
+static inline __u64 __arch_swab64(__u64 x)
{
__u64 result;
asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
return result;
}
+#define __arch_swab64 __arch_swab64
-static inline void ___arch__swab64s(__u64 *x)
+static inline void __arch_swab64s(__u64 *x)
{
- *x = ___arch__swab64p(x);
+ *x = __arch_swab64p(x);
}
+#define __arch_swab64s __arch_swab64s
#endif /* __s390x__ */
-static inline __u32 ___arch__swab32p(const __u32 *x)
+static inline __u32 __arch_swab32p(const __u32 *x)
{
__u32 result;
@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x)
#endif /* __s390x__ */
return result;
}
+#define __arch_swab32p __arch_swab32p
-static inline __u32 ___arch__swab32(__u32 x)
+#ifdef __s390x__
+static inline __u32 __arch_swab32(__u32 x)
{
-#ifndef __s390x__
- return ___arch__swab32p(&x);
-#else /* __s390x__ */
__u32 result;
asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
return result;
-#endif /* __s390x__ */
-}
-
-static __inline__ void ___arch__swab32s(__u32 *x)
-{
- *x = ___arch__swab32p(x);
}
+#define __arch_swab32 __arch_swab32
+#endif /* __s390x__ */
-static __inline__ __u16 ___arch__swab16p(const __u16 *x)
+static inline __u16 __arch_swab16p(const __u16 *x)
{
__u16 result;
@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
#endif /* __s390x__ */
return result;
}
+#define __arch_swab16p __arch_swab16p
-static __inline__ __u16 ___arch__swab16(__u16 x)
-{
- return ___arch__swab16p(&x);
-}
-
-static __inline__ void ___arch__swab16s(__u16 *x)
-{
- *x = ___arch__swab16p(x);
-}
-
-#ifdef __s390x__
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __arch__swab64p(x) ___arch__swab64p(x)
-#define __arch__swab64s(x) ___arch__swab64s(x)
-#endif /* __s390x__ */
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab16(x) ___arch__swab16(x)
-#define __arch__swab32p(x) ___arch__swab32p(x)
-#define __arch__swab16p(x) ___arch__swab16p(x)
-#define __arch__swab32s(x) ___arch__swab32s(x)
-#define __arch__swab16s(x) ___arch__swab16s(x)
-
-#ifndef __s390x__
-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-# define __BYTEORDER_HAS_U64__
-# define __SWAB_64_THRU_32__
-#endif
-#else /* __s390x__ */
-#define __BYTEORDER_HAS_U64__
-#endif /* __s390x__ */
-
-#endif /* __GNUC__ */
-
-#include <linux/byteorder/big_endian.h>
+#include <linux/byteorder.h>
#endif /* _S390_BYTEORDER_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 261785ab5b22..d480f39d65e6 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t;
#include <asm/system.h> /* for save_access_regs */
#include <asm/mmu_context.h>
+#include <asm/vdso.h>
+
+extern unsigned int vdso_enabled;
+
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -191,4 +195,16 @@ do { \
current->mm->context.noexec == 0; \
})
+#define ARCH_DLINFO \
+do { \
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long)current->mm->context.vdso_base); \
+} while (0)
+
+struct linux_binprm;
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+int arch_setup_additional_pages(struct linux_binprm *, int);
+
#endif
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index 8be1f3a58042..ef6170995076 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -248,8 +248,8 @@ struct dcw {
#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
TCCB_MAX_DCW * sizeof(struct dcw) + \
sizeof(struct tccb_tcat))
-#define TCCB_SAC_DEFAULT 0xf901
-#define TCCB_SAC_INTRG 0xf902
+#define TCCB_SAC_DEFAULT 0x1ffe
+#define TCCB_SAC_INTRG 0x1fff
/**
* struct tccb_tcah - Transport-Command-Area Header (TCAH)
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
new file mode 100644
index 000000000000..5a5bc75e19d4
--- /dev/null
+++ b/arch/s390/include/asm/ftrace.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_FTRACE_H
+#define _ASM_S390_FTRACE_H
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#endif
+
+#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 34bb8916db4f..1420a1115948 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -17,6 +17,7 @@
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
+#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */
void isc_register(unsigned int isc);
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d2b4ff831477..3b59216e6284 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,6 +6,7 @@ typedef struct {
struct list_head pgtable_list;
unsigned long asce_bits;
unsigned long asce_limit;
+ unsigned long vdso_base;
int noexec;
int has_pgste; /* The mmu context has extended page tables */
int alloc_pgste; /* cloned contexts will have extended page tables */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 991ba939408c..32e8f6aa4384 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order);
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
+#define __HAVE_ARCH_GATE_AREA 1
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f5b2bf3d7c1d..b2658b9220fe 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
+ typedef struct { char _[n]; } addrtype;
+
*s = val;
n = (n / 256) - 1;
asm volatile(
@@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
"0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
- : "+a" (s), "+d" (n));
+ : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
+ : "m" (*(addrtype *) s));
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 4af80af2a88f..066b99502e09 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -13,6 +13,7 @@
#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
+#include <linux/linkage.h>
#include <asm/ptrace.h>
#ifdef __KERNEL__
@@ -258,7 +259,7 @@ static inline void enabled_wait(void)
* Function to drop a processor into disabled wait state
*/
-static inline void disabled_wait(unsigned long code)
+static inline void ATTRIB_NORET disabled_wait(unsigned long code)
{
unsigned long ctl_buf;
psw_t dw_psw;
@@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code)
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
#endif /* __s390x__ */
+ while (1);
}
/*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 4734c3f05354..27fc1746de15 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -373,16 +373,16 @@ struct qdio_initialize {
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_PCI_OUT 0x10
-extern int qdio_initialize(struct qdio_initialize *init_data);
-extern int qdio_allocate(struct qdio_initialize *init_data);
-extern int qdio_establish(struct qdio_initialize *init_data);
+extern int qdio_initialize(struct qdio_initialize *);
+extern int qdio_allocate(struct qdio_initialize *);
+extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);
-extern int do_QDIO(struct ccw_device*, unsigned int flags,
- int q_nr, int qidx, int count);
-extern int qdio_cleanup(struct ccw_device*, int how);
-extern int qdio_shutdown(struct ccw_device*, int how);
+extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+ int q_nr, int bufnr, int count);
+extern int qdio_cleanup(struct ccw_device*, int);
+extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *);
-extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev);
+extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
#endif /* __QDIO_H__ */
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index e16d56f8dfe1..ec403d4304f8 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -61,6 +61,7 @@ typedef enum
{
ec_schedule=0,
ec_call_function,
+ ec_call_function_single,
ec_bit_last
} ec_bit_sig;
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ae89cf2478fc..024b91e06239 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
#endif
#ifndef CONFIG_SMP
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 79d01343f8b0..ad93212d9e16 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
return r0;
}
+/*
+ * Service level reporting interface.
+ */
+struct service_level {
+ struct list_head list;
+ void (*seq_print)(struct seq_file *, struct service_level *);
+};
+
+int register_service_level(struct service_level *);
+int unregister_service_level(struct service_level *);
+
#endif /* __ASM_S390_SYSINFO_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 819e7d99ca0c..024ef42ed6d7 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -12,6 +12,7 @@
#define __ASM_SYSTEM_H
#include <linux/kernel.h>
+#include <linux/errno.h>
#include <asm/types.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
@@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_vtime(struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
-#else
-#define account_vtime(x) do { /* empty */ } while (0)
-#endif
#ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
@@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask)
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
-int stfle(unsigned long long *list, int doublewords);
-
#ifdef CONFIG_SMP
extern void smp_ctl_set_bit(int cr, int bit);
@@ -438,6 +433,23 @@ static inline unsigned int stfl(void)
return S390_lowcore.stfl_fac_list;
}
+static inline int __stfle(unsigned long long *list, int doublewords)
+{
+ typedef struct { unsigned long long _[doublewords]; } addrtype;
+ register unsigned long __nr asm("0") = doublewords - 1;
+
+ asm volatile(".insn s,0xb2b00000,%0" /* stfle */
+ : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
+ return __nr + 1;
+}
+
+static inline int stfle(unsigned long long *list, int doublewords)
+{
+ if (!(stfl() & (1UL << 24)))
+ return -EOPNOTSUPP;
+ return __stfle(list, doublewords);
+}
+
static inline unsigned short stap(void)
{
unsigned short cpu_address;
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index d98d79e35cd6..61705d60f995 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);
-#ifdef CONFIG_VIRT_TIMER
-
extern void vtime_start_cpu_timer(void);
extern void vtime_stop_cpu_timer(void);
-#else
-
-static inline void vtime_start_cpu_timer(void) { }
-static inline void vtime_stop_cpu_timer(void) { }
-
-#endif /* CONFIG_VIRT_TIMER */
-
#endif /* __KERNEL__ */
#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
new file mode 100644
index 000000000000..a44f4fe16a35
--- /dev/null
+++ b/arch/s390/include/asm/vdso.h
@@ -0,0 +1,39 @@
+#ifndef __S390_VDSO_H__
+#define __S390_VDSO_H__
+
+#ifdef __KERNEL__
+
+/* Default link addresses for the vDSOs */
+#define VDSO32_LBASE 0
+#define VDSO64_LBASE 0
+
+#define VDSO_VERSION_STRING LINUX_2.6.26
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Note about this structure:
+ *
+ * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this
+ * structure is supposed to be known only to the function in the vdso
+ * itself and may change without notice.
+ */
+
+struct vdso_data {
+ __u64 tb_update_count; /* Timebase atomicity ctr 0x00 */
+ __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
+ __u64 xtime_clock_sec; /* Kernel time 0x10 */
+ __u64 xtime_clock_nsec; /* 0x18 */
+ __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */
+ __u64 wtom_clock_nsec; /* 0x28 */
+ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
+ __u32 tz_dsttime; /* Type of dst correction 0x34 */
+};
+
+extern struct vdso_data *vdso_data;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 50f657e77344..3edc6c6f258b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,6 +2,11 @@
# Makefile for the linux kernel.
#
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early boot code
+CFLAGS_REMOVE_early.o = -pg
+endif
+
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
@@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-obj-y := bitmap.o traps.o time.o process.o base.o early.o \
- setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
- s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
+obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
+ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
+ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
+ vdso.o vtime.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
compat_wrapper.o compat_exec_domain.o \
$(compat-obj-y)
-obj-$(CONFIG_VIRT_TIMER) += vtime.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
# Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
+# vdso
+obj-$(CONFIG_64BIT) += vdso64/
+obj-$(CONFIG_32BIT) += vdso32/
+obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 3d144e6020c6..e641f60bac99 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/kbuild.h>
+#include <asm/vdso.h>
int main(void)
{
@@ -38,5 +39,19 @@ int main(void)
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
+ BLANK();
+ /* timeval/timezone offsets for use by vdso */
+ DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count));
+ DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
+ DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
+ DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
+ DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
+ DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
+ DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
+ /* constants used by the vdso */
+ DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
+ DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+
return 0;
}
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d8c1131e0815..3e8b8816f309 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -7,6 +7,9 @@
* Christian Borntraeger (cborntra@de.ibm.com),
*/
+#define KMSG_COMPONENT "cpcmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
(((unsigned long)response + rlen) >> 31)) {
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
if (!lowbuf) {
- printk(KERN_WARNING
- "cpcmd: could not allocate response buffer\n");
+ pr_warning("The cpcmd kernel function failed to "
+ "allocate a response buffer\n");
return -ENOMEM;
}
spin_lock_irqsave(&cpcmd_lock, flags);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index d80fcd4a7fe1..ba03fc0a3a56 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -10,6 +10,9 @@
* Bugreports to: <Linux390@de.ibm.com>
*/
+#define KMSG_COMPONENT "s390dbf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode)
debug_info_free(rc);
} while (1);
- if(!rc || (mode == NO_AREAS))
+ if (mode == NO_AREAS)
goto out;
for(i = 0; i < in->nr_areas; i++){
@@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
/* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0))
- printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
- "= 0 are supported. Using root as owner now!");
+ pr_warning("Root becomes the owner of all s390dbf files "
+ "in sysfs\n");
if (!initialized)
BUG();
mutex_lock(&debug_mutex);
@@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
debug_register_view(rc, &debug_pages_view);
out:
if (!rc){
- printk(KERN_ERR "debug: debug_register failed for %s\n",name);
+ pr_err("Registering debug feature %s failed\n", name);
}
mutex_unlock(&debug_mutex);
return rc;
@@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
if(pages_per_area > 0){
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
if(!new_areas) {
- printk(KERN_WARNING "debug: could not allocate memory "\
- "for pagenumber: %i\n",pages_per_area);
+ pr_info("Allocating memory for %i pages failed\n",
+ pages_per_area);
rc = -ENOMEM;
goto out;
}
@@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
memset(id->active_entries,0,sizeof(int)*id->nr_areas);
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
spin_unlock_irqrestore(&id->lock,flags);
- printk(KERN_INFO "debug: %s: set new size (%i pages)\n"\
- ,id->name, pages_per_area);
+ pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area);
out:
return rc;
}
@@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level)
spin_lock_irqsave(&id->lock,flags);
if(new_level == DEBUG_OFF_LEVEL){
id->level = DEBUG_OFF_LEVEL;
- printk(KERN_INFO "debug: %s: switched off\n",id->name);
+ pr_info("%s: switched off\n",id->name);
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
- printk(KERN_INFO
- "debug: %s: level %i is out of range (%i - %i)\n",
+ pr_info("%s: level %i is out of range (%i - %i)\n",
id->name, new_level, 0, DEBUG_MAX_LEVEL);
} else {
id->level = new_level;
@@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id , &debug_file_ops);
if (!pde){
- printk(KERN_WARNING "debug: debugfs_create_file() failed!"\
- " Cannot register view %s/%s\n", id->name,view->name);
+ pr_err("Registering view %s/%s failed due to out of "
+ "memory\n", id->name,view->name);
rc = -1;
goto out;
}
@@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
break;
}
if (i == DEBUG_MAX_VIEWS) {
- printk(KERN_WARNING "debug: cannot register view %s/%s\n",
- id->name,view->name);
- printk(KERN_WARNING
- "debug: maximum number of views reached (%i)!\n", i);
+ pr_err("Registering view %s/%s would exceed the maximum "
+ "number of views %i\n", id->name, view->name, i);
debugfs_remove(pde);
rc = -1;
} else {
@@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view,
new_level = debug_get_uint(str);
}
if(new_level < 0) {
- printk(KERN_INFO "debug: level `%s` is not valid\n", str);
+ pr_warning("%s is not a valid level for a debug "
+ "feature\n", str);
rc = -EINVAL;
} else {
debug_set_level(id, new_level);
@@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
goto out;
}
- printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]);
+ pr_info("Flushing debug data failed because %c is not a valid "
+ "area\n", input_buf[0]);
out:
*offset += user_len;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 198ea18a534d..55de521aef77 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT
* R15 - kernel stack pointer
*/
- .macro STORE_TIMER lc_offset
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- stpt \lc_offset
-#endif
- .endm
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
lm %r10,%r11,\lc_from
sl %r10,\lc_to
@@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT
al %r10,BASED(.Lc_1)
1: stm %r10,%r11,\lc_sum
.endm
-#endif
.macro SAVE_ALL_BASE savearea
stm %r12,%r15,\savearea
@@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT
ni \psworg+1,0xfd # clear wait state bit
.endif
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- STORE_TIMER __LC_EXIT_TIMER
+ stpt __LC_EXIT_TIMER
lpsw \psworg # back to caller
.endm
@@ -247,20 +239,18 @@ __critical_start:
.globl system_call
system_call:
- STORE_TIMER __LC_SYNC_ENTER_TIMER
+ stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
lh %r7,0x8a # get svc number from lowcore
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
sysc_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
sysc_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-#endif
sysc_do_svc:
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
ltr %r7,%r7 # test for svc 0
@@ -436,7 +426,7 @@ ret_from_fork:
basr %r14,%r1
TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- b BASED(sysc_return)
+ b BASED(sysc_tracenogo)
#
# kernel_execve function needs to deal with pt_regs that is not
@@ -490,20 +480,18 @@ pgm_check_handler:
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- STORE_TIMER __LC_SYNC_ENTER_TIMER
+ stpt __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(pgm_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime:
-#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
l %r3,__LC_PGM_ILC # load program interruption code
@@ -536,14 +524,12 @@ pgm_per:
pgm_per_std:
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(pgm_no_vtime2)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
-#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
l %r1,__TI_task(%r9)
@@ -565,11 +551,9 @@ pgm_no_vtime2:
pgm_svcper:
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-#endif
lh %r7,0x8a # get svc number from lowcore
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
@@ -599,19 +583,17 @@ kernel_per:
.globl io_int_handler
io_int_handler:
- STORE_TIMER __LC_ASYNC_ENTER_TIMER
+ stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(io_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
io_no_vtime:
-#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
@@ -741,19 +723,17 @@ io_notify_resume:
.globl ext_int_handler
ext_int_handler:
- STORE_TIMER __LC_ASYNC_ENTER_TIMER
+ stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(ext_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
ext_no_vtime:
-#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -776,7 +756,6 @@ mcck_int_handler:
la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -793,9 +772,7 @@ mcck_int_handler:
la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14)
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
-1:
-#endif
- tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
bno BASED(mcck_int_main) # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
bnz BASED(mcck_int_main) # from user -> load async stack
@@ -812,7 +789,6 @@ mcck_int_main:
be BASED(0f)
l %r15,__LC_PANIC_STACK # load panic stack
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -821,7 +797,6 @@ mcck_int_main:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
mcck_no_vtime:
-#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ls390_mcck)
@@ -843,16 +818,13 @@ mcck_no_vtime:
mcck_return:
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
bno BASED(0f)
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
stpt __LC_EXIT_TIMER
lpsw __LC_RETURN_MCCK_PSW # back to caller
-0:
-#endif
- lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
+0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
lpsw __LC_RETURN_MCCK_PSW # back to caller
RESTORE_ALL __LC_RETURN_MCCK_PSW,0
@@ -976,13 +948,11 @@ cleanup_system_call:
b BASED(1f)
0: la %r12,__LC_SAVE_AREA+32
1:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
bh BASED(0f)
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
bhe BASED(cleanup_vtime)
-#endif
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
bh BASED(0f)
mvc __LC_SAVE_AREA(16),0(%r12)
@@ -993,7 +963,6 @@ cleanup_system_call:
l %r12,__LC_SAVE_AREA+48 # argh
st %r15,12(%r12)
lh %r7,0x8a
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
cleanup_vtime:
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
bhe BASED(cleanup_stime)
@@ -1004,18 +973,15 @@ cleanup_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
cleanup_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-#endif
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
la %r12,__LC_RETURN_PSW
br %r14
cleanup_system_call_insn:
.long sysc_saveall + 0x80000000
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.long system_call + 0x80000000
.long sysc_vtime + 0x80000000
.long sysc_stime + 0x80000000
.long sysc_update + 0x80000000
-#endif
cleanup_sysc_return:
mvc __LC_RETURN_PSW(4),0(%r12)
@@ -1026,11 +992,9 @@ cleanup_sysc_return:
cleanup_sysc_leave:
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
be BASED(2f)
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
be BASED(2f)
-#endif
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw)
bne BASED(0f)
@@ -1043,9 +1007,7 @@ cleanup_sysc_leave:
br %r14
cleanup_sysc_leave_insn:
.long sysc_done - 4 + 0x80000000
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.long sysc_done - 8 + 0x80000000
-#endif
cleanup_io_return:
mvc __LC_RETURN_PSW(4),0(%r12)
@@ -1056,11 +1018,9 @@ cleanup_io_return:
cleanup_io_leave:
clc 4(4,%r12),BASED(cleanup_io_leave_insn)
be BASED(2f)
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
be BASED(2f)
-#endif
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw)
bne BASED(0f)
@@ -1073,9 +1033,7 @@ cleanup_io_leave:
br %r14
cleanup_io_leave_insn:
.long io_done - 4 + 0x80000000
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.long io_done - 8 + 0x80000000
-#endif
/*
* Integer constants
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 89c121ae6339..16bb4fd1a403 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
#define LOCKDEP_SYS_EXIT
#endif
- .macro STORE_TIMER lc_offset
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- stpt \lc_offset
-#endif
- .endm
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
lg %r10,\lc_from
slg %r10,\lc_to
alg %r10,\lc_sum
stg %r10,\lc_sum
.endm
-#endif
/*
* Register usage in interrupt handlers:
@@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
ni \psworg+1,0xfd # clear wait state bit
.endif
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- STORE_TIMER __LC_EXIT_TIMER
+ stpt __LC_EXIT_TIMER
lpswe \psworg # back to caller
.endm
@@ -233,20 +225,18 @@ __critical_start:
.globl system_call
system_call:
- STORE_TIMER __LC_SYNC_ENTER_TIMER
+ stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
sysc_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
sysc_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-#endif
sysc_do_svc:
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
ltgr %r7,%r7 # test for svc 0
@@ -417,7 +407,7 @@ ret_from_fork:
0: brasl %r14,schedule_tail
TRACE_IRQS_ON
stosm 24(%r15),0x03 # reenable interrupts
- j sysc_return
+ j sysc_tracenogo
#
# kernel_execve function needs to deal with pt_regs that is not
@@ -469,20 +459,18 @@ pgm_check_handler:
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- STORE_TIMER __LC_SYNC_ENTER_TIMER
+ stpt __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime:
-#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
TRACE_IRQS_OFF
@@ -516,14 +504,12 @@ pgm_per:
pgm_per_std:
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime2
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
-#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
lg %r1,__TI_task(%r9)
@@ -545,11 +531,9 @@ pgm_no_vtime2:
pgm_svcper:
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-#endif
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
@@ -575,19 +559,17 @@ kernel_per:
*/
.globl io_int_handler
io_int_handler:
- STORE_TIMER __LC_ASYNC_ENTER_TIMER
+ stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz io_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
io_no_vtime:
-#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -739,19 +721,17 @@ io_notify_resume:
*/
.globl ext_int_handler
ext_int_handler:
- STORE_TIMER __LC_ASYNC_ENTER_TIMER
+ stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz ext_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
ext_no_vtime:
-#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -773,7 +753,6 @@ mcck_int_handler:
la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
la %r14,4095
mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
@@ -791,9 +770,7 @@ mcck_int_handler:
la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14)
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
-1:
-#endif
- tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
jnz mcck_int_main # from user -> load kernel stack
@@ -809,7 +786,6 @@ mcck_int_main:
jz 0f
lg %r15,__LC_PANIC_STACK # load panic stack
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -818,7 +794,6 @@ mcck_int_main:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
mcck_no_vtime:
-#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,s390_do_machine_check
@@ -839,14 +814,11 @@ mcck_return:
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
stpt __LC_EXIT_TIMER
-0:
-#endif
- lpswe __LC_RETURN_MCCK_PSW # back to caller
+0: lpswe __LC_RETURN_MCCK_PSW # back to caller
/*
* Restart interruption handler, kick starter for additional CPUs
@@ -964,13 +936,11 @@ cleanup_system_call:
j 1f
0: la %r12,__LC_SAVE_AREA+64
1:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
jhe cleanup_vtime
-#endif
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
jh 0f
mvc __LC_SAVE_AREA(32),0(%r12)
@@ -981,7 +951,6 @@ cleanup_system_call:
lg %r12,__LC_SAVE_AREA+96 # argh
stg %r15,24(%r12)
llgh %r7,__LC_SVC_INT_CODE
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
cleanup_vtime:
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
jhe cleanup_stime
@@ -992,18 +961,15 @@ cleanup_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
cleanup_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-#endif
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
la %r12,__LC_RETURN_PSW
br %r14
cleanup_system_call_insn:
.quad sysc_saveall
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.quad system_call
.quad sysc_vtime
.quad sysc_stime
.quad sysc_update
-#endif
cleanup_sysc_return:
mvc __LC_RETURN_PSW(8),0(%r12)
@@ -1014,11 +980,9 @@ cleanup_sysc_return:
cleanup_sysc_leave:
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
je 2f
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
je 2f
-#endif
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
jne 0f
@@ -1031,9 +995,7 @@ cleanup_sysc_leave:
br %r14
cleanup_sysc_leave_insn:
.quad sysc_done - 4
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.quad sysc_done - 8
-#endif
cleanup_io_return:
mvc __LC_RETURN_PSW(8),0(%r12)
@@ -1044,11 +1006,9 @@ cleanup_io_return:
cleanup_io_leave:
clc 8(8,%r12),BASED(cleanup_io_leave_insn)
je 2f
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
je 2f
-#endif
mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
jne 0f
@@ -1061,9 +1021,7 @@ cleanup_io_leave:
br %r14
cleanup_io_leave_insn:
.quad io_done - 4
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.quad io_done - 8
-#endif
/*
* Integer constants
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 83477c7dc743..ec7e35f6055b 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -461,6 +461,55 @@ start:
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
+#
+# startup-code at 0x10000, running in absolute addressing mode
+# this is called either by the ipl loader or directly by PSW restart
+# or linload or SALIPL
+#
+ .org 0x10000
+startup:basr %r13,0 # get base
+.LPG0:
+
+#ifndef CONFIG_MARCH_G5
+ # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10}
+ stidp __LC_CPUID # store cpuid
+ lhi %r0,(3f-2f) / 2
+ la %r1,2f-.LPG0(%r13)
+0: clc __LC_CPUID+4(2),0(%r1)
+ jne 3f
+ lpsw 1f-.LPG0(13) # machine type not good enough, crash
+ .align 16
+1: .long 0x000a0000,0x00000000
+2:
+#if defined(CONFIG_MARCH_Z10)
+ .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096
+#elif defined(CONFIG_MARCH_Z9_109)
+ .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086
+#elif defined(CONFIG_MARCH_Z990)
+ .short 0x9672, 0x2064, 0x2066
+#elif defined(CONFIG_MARCH_Z900)
+ .short 0x9672
+#endif
+3: la %r1,2(%r1)
+ brct %r0,0b
+#endif
+
+ l %r13,0f-.LPG0(%r13)
+ b 0(%r13)
+0: .long startup_continue
+
+#
+# params at 10400 (setup.h)
+#
+ .org PARMAREA
+ .long 0,0 # IPL_DEVICE
+ .long 0,0 # INITRD_START
+ .long 0,0 # INITRD_SIZE
+
+ .org COMMAND_LINE
+ .byte "root=/dev/ram0 ro"
+ .byte 0
+
#ifdef CONFIG_64BIT
#include "head64.S"
#else
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index a816e2de32b9..db476d114caa 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -10,34 +10,13 @@
*
*/
-#
-# startup-code at 0x10000, running in absolute addressing mode
-# this is called either by the ipl loader or directly by PSW restart
-# or linload or SALIPL
-#
- .org 0x10000
-startup:basr %r13,0 # get base
-.LPG0: l %r13,0f-.LPG0(%r13)
- b 0(%r13)
-0: .long startup_continue
-
-#
-# params at 10400 (setup.h)
-#
- .org PARMAREA
- .long 0,0 # IPL_DEVICE
- .long 0,0 # INITRD_START
- .long 0,0 # INITRD_SIZE
-
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
-
.org 0x11000
startup_continue:
basr %r13,0 # get base
-.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
+.LPG1:
+
+ mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
@@ -50,7 +29,6 @@ startup_continue:
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
st %r15,__LC_KERNEL_STACK # set end of kernel stack
ahi %r15,-96
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
#
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
# and create a kernel NSS if the SAVESYS= parm is defined
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 1d06961e87b3..3ccd36b24b8f 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -10,29 +10,6 @@
*
*/
-#
-# startup-code at 0x10000, running in absolute addressing mode
-# this is called either by the ipl loader or directly by PSW restart
-# or linload or SALIPL
-#
- .org 0x10000
-startup:basr %r13,0 # get base
-.LPG0: l %r13,0f-.LPG0(%r13)
- b 0(%r13)
-0: .long startup_continue
-
-#
-# params at 10400 (setup.h)
-#
- .org PARMAREA
- .quad 0 # IPL_DEVICE
- .quad 0 # INITRD_START
- .quad 0 # INITRD_SIZE
-
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
-
.org 0x11000
startup_continue:
@@ -119,7 +96,6 @@ startup_continue:
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
#
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
# and create a kernel NSS if the SAVESYS= parm is defined
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
new file mode 100644
index 000000000000..397d131a345f
--- /dev/null
+++ b/arch/s390/kernel/mcount.S
@@ -0,0 +1,56 @@
+/*
+ * Copyright IBM Corp. 2008
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#ifndef CONFIG_64BIT
+.globl _mcount
+_mcount:
+ stm %r0,%r5,8(%r15)
+ st %r14,56(%r15)
+ lr %r1,%r15
+ ahi %r15,-96
+ l %r3,100(%r15)
+ la %r2,0(%r14)
+ st %r1,0(%r15)
+ la %r3,0(%r3)
+ bras %r14,0f
+ .long ftrace_trace_function
+0: l %r14,0(%r14)
+ l %r14,0(%r14)
+ basr %r14,%r14
+ ahi %r15,96
+ lm %r0,%r5,8(%r15)
+ l %r14,56(%r15)
+ br %r14
+
+.globl ftrace_stub
+ftrace_stub:
+ br %r14
+
+#else /* CONFIG_64BIT */
+
+.globl _mcount
+_mcount:
+ stmg %r0,%r5,16(%r15)
+ stg %r14,112(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,0(%r15)
+ lgr %r2,%r14
+ lg %r3,168(%r15)
+ larl %r14,ftrace_trace_function
+ lg %r14,0(%r14)
+ basr %r14,%r14
+ aghi %r15,160
+ lmg %r0,%r5,16(%r15)
+ lg %r14,112(%r15)
+ br %r14
+
+.globl ftrace_stub
+ftrace_stub:
+ br %r14
+
+#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
new file mode 100644
index 000000000000..82c1872cfe80
--- /dev/null
+++ b/arch/s390/kernel/processor.c
@@ -0,0 +1,98 @@
+/*
+ * arch/s390/kernel/processor.c
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cpu"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+
+#include <asm/elf.h>
+#include <asm/lowcore.h>
+#include <asm/param.h>
+
+void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
+{
+ pr_info("Processor %d started, address %d, identification %06X\n",
+ cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident);
+}
+
+/*
+ * show_cpuinfo - Get information on one CPU for use by procfs.
+ */
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ static const char *hwcap_str[8] = {
+ "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
+ "edat"
+ };
+ struct cpuinfo_S390 *cpuinfo;
+ unsigned long n = (unsigned long) v - 1;
+ int i;
+
+ s390_adjust_jiffies();
+ preempt_disable();
+ if (!n) {
+ seq_printf(m, "vendor_id : IBM/S390\n"
+ "# processors : %i\n"
+ "bogomips per cpu: %lu.%02lu\n",
+ num_online_cpus(), loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ))%100);
+ seq_puts(m, "features\t: ");
+ for (i = 0; i < 8; i++)
+ if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
+ seq_printf(m, "%s ", hwcap_str[i]);
+ seq_puts(m, "\n");
+ }
+
+ if (cpu_online(n)) {
+#ifdef CONFIG_SMP
+ if (smp_processor_id() == n)
+ cpuinfo = &S390_lowcore.cpu_data;
+ else
+ cpuinfo = &lowcore_ptr[n]->cpu_data;
+#else
+ cpuinfo = &S390_lowcore.cpu_data;
+#endif
+ seq_printf(m, "processor %li: "
+ "version = %02X, "
+ "identification = %06X, "
+ "machine = %04X\n",
+ n, cpuinfo->cpu_id.version,
+ cpuinfo->cpu_id.ident,
+ cpuinfo->cpu_id.machine);
+ }
+ preempt_enable();
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 38ff2bce1203..75c496f4f16d 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
static int
peek_user(struct task_struct *child, addr_t addr, addr_t data)
{
- struct user *dummy = NULL;
addr_t tmp, mask;
/*
@@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
*/
mask = __ADDR_MASK;
#ifdef CONFIG_64BIT
- if (addr >= (addr_t) &dummy->regs.acrs &&
- addr < (addr_t) &dummy->regs.orig_gpr2)
+ if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
+ addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
@@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
static int
poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
- struct user *dummy = NULL;
addr_t mask;
/*
@@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
*/
mask = __ADDR_MASK;
#ifdef CONFIG_64BIT
- if (addr >= (addr_t) &dummy->regs.acrs &&
- addr < (addr_t) &dummy->regs.orig_gpr2)
+ if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
+ addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 48238a114ce9..46b90cb03707 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -14,6 +14,7 @@
#include <asm/delay.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
+#include <asm/ftrace.h>
#ifdef CONFIG_IP_MULTICAST
#include <net/arp.h>
#endif
@@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold);
EXPORT_SYMBOL(console_mode);
EXPORT_SYMBOL(console_devno);
EXPORT_SYMBOL(console_irq);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 400b040df7fa..b7a1efd5522c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -14,6 +14,9 @@
* This file handles the architecture-dependent parts of initialization
*/
+#define KMSG_COMPONENT "setup"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -32,7 +35,6 @@
#include <linux/bootmem.h>
#include <linux/root_dev.h>
#include <linux/console.h>
-#include <linux/seq_file.h>
#include <linux/kernel_stat.h>
#include <linux/device.h>
#include <linux/notifier.h>
@@ -291,8 +293,8 @@ unsigned int switch_amode = 0;
#endif
EXPORT_SYMBOL_GPL(switch_amode);
-static void set_amode_and_uaccess(unsigned long user_amode,
- unsigned long user32_amode)
+static int set_amode_and_uaccess(unsigned long user_amode,
+ unsigned long user32_amode)
{
psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
@@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode,
PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
if (MACHINE_HAS_MVCOS) {
- printk("mvcos available.\n");
memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
+ return 1;
} else {
- printk("mvcos not available.\n");
memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
+ return 0;
}
}
@@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p)
early_param("switch_amode", early_parse_switch_amode);
#else /* CONFIG_S390_SWITCH_AMODE */
-static inline void set_amode_and_uaccess(unsigned long user_amode,
- unsigned long user32_amode)
+static inline int set_amode_and_uaccess(unsigned long user_amode,
+ unsigned long user32_amode)
{
+ return 0;
}
#endif /* CONFIG_S390_SWITCH_AMODE */
@@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec);
static void setup_addressing_mode(void)
{
if (s390_noexec) {
- printk("S390 execute protection active, ");
- set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
+ if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
+ PSW32_ASC_SECONDARY))
+ pr_info("Execute protection active, "
+ "mvcos available\n");
+ else
+ pr_info("Execute protection active, "
+ "mvcos not available\n");
} else if (switch_amode) {
- printk("S390 address spaces switched, ");
- set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
+ if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
+ pr_info("Address spaces switched, "
+ "mvcos available\n");
+ else
+ pr_info("Address spaces switched, "
+ "mvcos not available\n");
}
#ifdef CONFIG_TRACE_IRQFLAGS
sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
@@ -572,15 +584,15 @@ setup_memory(void)
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
if (start + INITRD_SIZE > memory_end) {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\n"
+ pr_err("initrd extends beyond end of "
+ "memory (0x%08lx > 0x%08lx) "
"disabling initrd\n",
start + INITRD_SIZE, memory_end);
INITRD_START = INITRD_SIZE = 0;
} else {
- printk("Moving initrd (0x%08lx -> 0x%08lx, "
- "size: %ld)\n",
- INITRD_START, start, INITRD_SIZE);
+ pr_info("Moving initrd (0x%08lx -> "
+ "0x%08lx, size: %ld)\n",
+ INITRD_START, start, INITRD_SIZE);
memmove((void *) start, (void *) INITRD_START,
INITRD_SIZE);
INITRD_START = start;
@@ -642,8 +654,9 @@ setup_memory(void)
initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE;
} else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ pr_err("initrd extends beyond end of "
+ "memory (0x%08lx > 0x%08lx) "
+ "disabling initrd\n",
initrd_start + INITRD_SIZE, memory_end);
initrd_start = initrd_end = 0;
}
@@ -651,23 +664,6 @@ setup_memory(void)
#endif
}
-static int __init __stfle(unsigned long long *list, int doublewords)
-{
- typedef struct { unsigned long long _[doublewords]; } addrtype;
- register unsigned long __nr asm("0") = doublewords - 1;
-
- asm volatile(".insn s,0xb2b00000,%0" /* stfle */
- : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
- return __nr + 1;
-}
-
-int __init stfle(unsigned long long *list, int doublewords)
-{
- if (!(stfl() & (1UL << 24)))
- return -EOPNOTSUPP;
- return __stfle(list, doublewords);
-}
-
/*
* Setup hardware capabilities.
*/
@@ -739,8 +735,13 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z990");
break;
case 0x2094:
+ case 0x2096:
strcpy(elf_platform, "z9-109");
break;
+ case 0x2097:
+ case 0x2098:
+ strcpy(elf_platform, "z10");
+ break;
}
}
@@ -752,25 +753,34 @@ static void __init setup_hwcaps(void)
void __init
setup_arch(char **cmdline_p)
{
+ /* set up preferred console */
+ add_preferred_console("ttyS", 0, NULL);
+
/*
* print what head.S has found out about the machine
*/
#ifndef CONFIG_64BIT
- printk((MACHINE_IS_VM) ?
- "We are running under VM (31 bit mode)\n" :
- "We are running native (31 bit mode)\n");
- printk((MACHINE_HAS_IEEE) ?
- "This machine has an IEEE fpu\n" :
- "This machine has no IEEE fpu\n");
+ if (MACHINE_IS_VM)
+ pr_info("Linux is running as a z/VM "
+ "guest operating system in 31-bit mode\n");
+ else
+ pr_info("Linux is running natively in 31-bit mode\n");
+ if (MACHINE_HAS_IEEE)
+ pr_info("The hardware system has IEEE compatible "
+ "floating point units\n");
+ else
+ pr_info("The hardware system has no IEEE compatible "
+ "floating point units\n");
#else /* CONFIG_64BIT */
if (MACHINE_IS_VM)
- printk("We are running under VM (64 bit mode)\n");
+ pr_info("Linux is running as a z/VM "
+ "guest operating system in 64-bit mode\n");
else if (MACHINE_IS_KVM) {
- printk("We are running under KVM (64 bit mode)\n");
+ pr_info("Linux is running under KVM in 64-bit mode\n");
add_preferred_console("hvc", 0, NULL);
s390_virtio_console_init();
} else
- printk("We are running native (64 bit mode)\n");
+ pr_info("Linux is running natively in 64-bit mode\n");
#endif /* CONFIG_64BIT */
/* Have one command line that is parsed and saved in /proc/cmdline */
@@ -818,90 +828,3 @@ setup_arch(char **cmdline_p)
/* Setup zfcpdump support */
setup_zfcpdump(console_devno);
}
-
-void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
-{
- printk(KERN_INFO "cpu %d "
-#ifdef CONFIG_SMP
- "phys_idx=%d "
-#endif
- "vers=%02X ident=%06X machine=%04X unused=%04X\n",
- cpuinfo->cpu_nr,
-#ifdef CONFIG_SMP
- cpuinfo->cpu_addr,
-#endif
- cpuinfo->cpu_id.version,
- cpuinfo->cpu_id.ident,
- cpuinfo->cpu_id.machine,
- cpuinfo->cpu_id.unused);
-}
-
-/*
- * show_cpuinfo - Get information on one CPU for use by procfs.
- */
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- static const char *hwcap_str[8] = {
- "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
- "edat"
- };
- struct cpuinfo_S390 *cpuinfo;
- unsigned long n = (unsigned long) v - 1;
- int i;
-
- s390_adjust_jiffies();
- preempt_disable();
- if (!n) {
- seq_printf(m, "vendor_id : IBM/S390\n"
- "# processors : %i\n"
- "bogomips per cpu: %lu.%02lu\n",
- num_online_cpus(), loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ))%100);
- seq_puts(m, "features\t: ");
- for (i = 0; i < 8; i++)
- if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
- seq_printf(m, "%s ", hwcap_str[i]);
- seq_puts(m, "\n");
- }
-
- if (cpu_online(n)) {
-#ifdef CONFIG_SMP
- if (smp_processor_id() == n)
- cpuinfo = &S390_lowcore.cpu_data;
- else
- cpuinfo = &lowcore_ptr[n]->cpu_data;
-#else
- cpuinfo = &S390_lowcore.cpu_data;
-#endif
- seq_printf(m, "processor %li: "
- "version = %02X, "
- "identification = %06X, "
- "machine = %04X\n",
- n, cpuinfo->cpu_id.version,
- cpuinfo->cpu_id.ident,
- cpuinfo->cpu_id.machine);
- }
- preempt_enable();
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
-
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b5595688a477..6fc78541dc57 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -20,6 +20,9 @@
* cpu_number_map in other architectures.
*/
+#define KMSG_COMPONENT "cpu"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
-/*
- * Structure and data for __smp_call_function_map(). This is designed to
- * minimise static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- cpumask_t started;
- cpumask_t finished;
- int wait;
-};
-
-static struct call_data_struct *call_data;
-
-/*
- * 'Call function' interrupt callback
- */
-static void do_call_function(void)
-{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- cpu_set(smp_processor_id(), call_data->started);
- (*func)(info);
- if (wait)
- cpu_set(smp_processor_id(), call_data->finished);;
-}
-
-static void __smp_call_function_map(void (*func) (void *info), void *info,
- int wait, cpumask_t map)
-{
- struct call_data_struct data;
- int cpu, local = 0;
-
- /*
- * Can deadlock when interrupts are disabled or if in wrong context.
- */
- WARN_ON(irqs_disabled() || in_irq());
-
- /*
- * Check for local function call. We have to have the same call order
- * as in on_each_cpu() because of machine_restart_smp().
- */
- if (cpu_isset(smp_processor_id(), map)) {
- local = 1;
- cpu_clear(smp_processor_id(), map);
- }
-
- cpus_and(map, map, cpu_online_map);
- if (cpus_empty(map))
- goto out;
-
- data.func = func;
- data.info = info;
- data.started = CPU_MASK_NONE;
- data.wait = wait;
- if (wait)
- data.finished = CPU_MASK_NONE;
-
- call_data = &data;
-
- for_each_cpu_mask(cpu, map)
- smp_ext_bitcall(cpu, ec_call_function);
-
- /* Wait for response */
- while (!cpus_equal(map, data.started))
- cpu_relax();
- if (wait)
- while (!cpus_equal(map, data.finished))
- cpu_relax();
-out:
- if (local) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
-}
-
-/*
- * smp_call_function:
- * @func: the function to run; this must be fast and non-blocking
- * @info: an arbitrary pointer to pass to the function
- * @wait: if true, wait (atomically) until function has completed on other CPUs
- *
- * Run a function on all other CPUs.
- *
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler or from a bottom half.
- */
-int smp_call_function(void (*func) (void *info), void *info, int wait)
-{
- cpumask_t map;
-
- spin_lock(&call_lock);
- map = cpu_online_map;
- cpu_clear(smp_processor_id(), map);
- __smp_call_function_map(func, info, wait, map);
- spin_unlock(&call_lock);
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * smp_call_function_single:
- * @cpu: the CPU where func should run
- * @func: the function to run; this must be fast and non-blocking
- * @info: an arbitrary pointer to pass to the function
- * @wait: if true, wait (atomically) until function has completed on other CPUs
- *
- * Run a function on one processor.
- *
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler or from a bottom half.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int wait)
-{
- spin_lock(&call_lock);
- __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
- spin_unlock(&call_lock);
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on. Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
- int wait)
-{
- spin_lock(&call_lock);
- cpu_clear(smp_processor_id(), mask);
- __smp_call_function_map(func, info, wait, mask);
- spin_unlock(&call_lock);
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_mask);
-
void smp_send_stop(void)
{
int cpu, rc;
@@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code)
bits = xchg(&S390_lowcore.ext_call_fast, 0);
if (test_bit(ec_call_function, &bits))
- do_call_function();
+ generic_smp_call_function_interrupt();
+
+ if (test_bit(ec_call_function_single, &bits))
+ generic_smp_call_function_single_interrupt();
}
/*
@@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
udelay(10);
}
+void arch_send_call_function_ipi(cpumask_t mask)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smp_ext_bitcall(cpu, ec_call_function);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_ext_bitcall(cpu, ec_call_function_single);
+}
+
#ifndef CONFIG_64BIT
/*
* this function sends a 'purge tlb' signal to another CPU.
@@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return;
if (cpu >= NR_CPUS) {
- printk(KERN_WARNING "Registers for cpu %i not saved since dump "
- "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
+ pr_warning("CPU %i exceeds the maximum %i and is excluded from "
+ "the dump\n", cpu, NR_CPUS - 1);
return;
}
zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
@@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void)
}
out:
kfree(info);
- printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
+ pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
get_online_cpus();
__smp_rescan_cpus();
put_online_cpus();
@@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid)
preempt_disable();
/* Enable TOD clock interrupts on the secondary cpu. */
init_cpu_timer();
-#ifdef CONFIG_VIRT_TIMER
/* Enable cpu timer interrupts on the secondary cpu. */
init_cpu_vtimer();
-#endif
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
/* call cpu notifiers */
notify_cpu_starting(smp_processor_id());
/* Mark this cpu as online */
- spin_lock(&call_lock);
+ ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map);
- spin_unlock(&call_lock);
+ ipi_call_unlock();
/* Switch on interrupts */
local_irq_enable();
/* Print info about this processor */
@@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
save_area = get_zeroed_page(GFP_KERNEL);
if (!save_area)
- goto out_save_area;
+ goto out;
lowcore->extended_save_area_addr = (u32) save_area;
}
#endif
lowcore_ptr[cpu] = lowcore;
return 0;
-#ifndef CONFIG_64BIT
-out_save_area:
- free_page(panic_stack);
-#endif
out:
+ free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) lowcore, lc_order);
return -ENOMEM;
@@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
- if (ccode) {
- printk("sigp_set_prefix failed for cpu %d "
- "with condition code %d\n",
- (int) cpu, (int) ccode);
+ if (ccode)
return -EIO;
- }
idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu];
@@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu)
while (!smp_cpu_not_running(cpu))
cpu_relax();
smp_free_lowcore(cpu);
- printk(KERN_INFO "Processor %d spun down\n", cpu);
+ pr_info("Processor %d stopped\n", cpu);
}
void cpu_die(void)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index eccefbbff887..5be981a36c3e 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -12,6 +12,9 @@
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
+#define KMSG_COMPONENT "time"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -20,6 +23,8 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/stop_machine.h>
#include <linux/time.h>
#include <linux/sysdev.h>
#include <linux/delay.h>
@@ -36,6 +41,7 @@
#include <asm/delay.h>
#include <asm/s390_ext.h>
#include <asm/div64.h>
+#include <asm/vdso.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/timer.h>
@@ -223,6 +229,36 @@ static struct clocksource clocksource_tod = {
};
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+{
+ if (clock != &clocksource_tod)
+ return;
+
+ /* Make userspace gettimeofday spin until we're done. */
+ ++vdso_data->tb_update_count;
+ smp_wmb();
+ vdso_data->xtime_tod_stamp = clock->cycle_last;
+ vdso_data->xtime_clock_sec = xtime.tv_sec;
+ vdso_data->xtime_clock_nsec = xtime.tv_nsec;
+ vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
+ vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ smp_wmb();
+ ++vdso_data->tb_update_count;
+}
+
+extern struct timezone sys_tz;
+
+void update_vsyscall_tz(void)
+{
+ /* Make userspace gettimeofday spin until we're done. */
+ ++vdso_data->tb_update_count;
+ smp_wmb();
+ vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+ smp_wmb();
+ ++vdso_data->tb_update_count;
+}
+
/*
* Initialize the TOD clock and the CPU timer of
* the boot cpu.
@@ -253,10 +289,8 @@ void __init time_init(void)
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
-
-#ifdef CONFIG_VIRT_TIMER
+ /* Enable cpu timer interrupts on the boot cpu. */
vtime_init();
-#endif
}
/*
@@ -288,8 +322,8 @@ static unsigned long long adjust_time(unsigned long long old,
}
sched_clock_base_cc += delta;
if (adjust.offset != 0) {
- printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
- adjust.offset);
+ pr_notice("The ETR interface has adjusted the clock "
+ "by %li microseconds\n", adjust.offset);
adjust.modes = ADJ_OFFSET_SINGLESHOT;
do_adjtimex(&adjust);
}
@@ -360,6 +394,15 @@ static void enable_sync_clock(void)
atomic_set_mask(0x80000000, sw_ptr);
}
+/* Single threaded workqueue used for etr and stp sync events */
+static struct workqueue_struct *time_sync_wq;
+
+static void __init time_init_wq(void)
+{
+ if (!time_sync_wq)
+ time_sync_wq = create_singlethread_workqueue("timesync");
+}
+
/*
* External Time Reference (ETR) code.
*/
@@ -425,6 +468,7 @@ static struct timer_list etr_timer;
static void etr_timeout(unsigned long dummy);
static void etr_work_fn(struct work_struct *work);
+static DEFINE_MUTEX(etr_work_mutex);
static DECLARE_WORK(etr_work, etr_work_fn);
/*
@@ -440,8 +484,8 @@ static void etr_reset(void)
etr_tolec = get_clock();
set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
} else if (etr_port0_online || etr_port1_online) {
- printk(KERN_WARNING "Running on non ETR capable "
- "machine, only local mode available.\n");
+ pr_warning("The real or virtual hardware system does "
+ "not provide an ETR interface\n");
etr_port0_online = etr_port1_online = 0;
}
}
@@ -452,17 +496,18 @@ static int __init etr_init(void)
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
return 0;
+ time_init_wq();
/* Check if this machine has the steai instruction. */
if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
etr_steai_available = 1;
setup_timer(&etr_timer, etr_timeout, 0UL);
if (etr_port0_online) {
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
if (etr_port1_online) {
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
return 0;
}
@@ -489,7 +534,7 @@ void etr_switch_to_local(void)
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
disable_sync_clock(NULL);
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
/*
@@ -505,7 +550,7 @@ void etr_sync_check(void)
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
disable_sync_clock(NULL);
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
/*
@@ -529,13 +574,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm)
* Both ports are not up-to-date now.
*/
set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
static void etr_timeout(unsigned long dummy)
{
set_bit(ETR_EVENT_UPDATE, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
/*
@@ -642,14 +687,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
}
struct clock_sync_data {
+ atomic_t cpus;
int in_sync;
unsigned long long fixup_cc;
+ int etr_port;
+ struct etr_aib *etr_aib;
};
-static void clock_sync_cpu_start(void *dummy)
+static void clock_sync_cpu(struct clock_sync_data *sync)
{
- struct clock_sync_data *sync = dummy;
-
+ atomic_dec(&sync->cpus);
enable_sync_clock();
/*
* This looks like a busy wait loop but it isn't. etr_sync_cpus
@@ -675,39 +722,35 @@ static void clock_sync_cpu_start(void *dummy)
fixup_clock_comparator(sync->fixup_cc);
}
-static void clock_sync_cpu_end(void *dummy)
-{
-}
-
/*
* Sync the TOD clock using the port refered to by aibp. This port
* has to be enabled and the other port has to be disabled. The
* last eacr update has to be more than 1.6 seconds in the past.
*/
-static int etr_sync_clock(struct etr_aib *aib, int port)
+static int etr_sync_clock(void *data)
{
- struct etr_aib *sync_port;
- struct clock_sync_data etr_sync;
+ static int first;
unsigned long long clock, old_clock, delay, delta;
- int follows;
+ struct clock_sync_data *etr_sync;
+ struct etr_aib *sync_port, *aib;
+ int port;
int rc;
- /* Check if the current aib is adjacent to the sync port aib. */
- sync_port = (port == 0) ? &etr_port0 : &etr_port1;
- follows = etr_aib_follows(sync_port, aib, port);
- memcpy(sync_port, aib, sizeof(*aib));
- if (!follows)
- return -EAGAIN;
+ etr_sync = data;
- /*
- * Catch all other cpus and make them wait until we have
- * successfully synced the clock. smp_call_function will
- * return after all other cpus are in etr_sync_cpu_start.
- */
- memset(&etr_sync, 0, sizeof(etr_sync));
- preempt_disable();
- smp_call_function(clock_sync_cpu_start, &etr_sync, 0);
- local_irq_disable();
+ if (xchg(&first, 1) == 1) {
+ /* Slave */
+ clock_sync_cpu(etr_sync);
+ return 0;
+ }
+
+ /* Wait until all other cpus entered the sync function. */
+ while (atomic_read(&etr_sync->cpus) != 0)
+ cpu_relax();
+
+ port = etr_sync->etr_port;
+ aib = etr_sync->etr_aib;
+ sync_port = (port == 0) ? &etr_port0 : &etr_port1;
enable_sync_clock();
/* Set clock to next OTE. */
@@ -724,16 +767,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32;
delta = adjust_time(old_clock, clock, delay);
- etr_sync.fixup_cc = delta;
+ etr_sync->fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */
if (!etr_aib_follows(sync_port, aib, port)) {
/* Didn't work. */
disable_sync_clock(NULL);
- etr_sync.in_sync = -EAGAIN;
+ etr_sync->in_sync = -EAGAIN;
rc = -EAGAIN;
} else {
- etr_sync.in_sync = 1;
+ etr_sync->in_sync = 1;
rc = 0;
}
} else {
@@ -741,12 +784,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
__ctl_clear_bit(0, 29);
__ctl_clear_bit(14, 21);
disable_sync_clock(NULL);
- etr_sync.in_sync = -EAGAIN;
+ etr_sync->in_sync = -EAGAIN;
rc = -EAGAIN;
}
- local_irq_enable();
- smp_call_function(clock_sync_cpu_end, NULL, 0);
- preempt_enable();
+ xchg(&first, 0);
+ return rc;
+}
+
+static int etr_sync_clock_stop(struct etr_aib *aib, int port)
+{
+ struct clock_sync_data etr_sync;
+ struct etr_aib *sync_port;
+ int follows;
+ int rc;
+
+ /* Check if the current aib is adjacent to the sync port aib. */
+ sync_port = (port == 0) ? &etr_port0 : &etr_port1;
+ follows = etr_aib_follows(sync_port, aib, port);
+ memcpy(sync_port, aib, sizeof(*aib));
+ if (!follows)
+ return -EAGAIN;
+ memset(&etr_sync, 0, sizeof(etr_sync));
+ etr_sync.etr_aib = aib;
+ etr_sync.etr_port = port;
+ get_online_cpus();
+ atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
+ rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
+ put_online_cpus();
return rc;
}
@@ -903,7 +967,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
}
/*
- * ETR tasklet. In this function you'll find the main logic. In
+ * ETR work. In this function you'll find the main logic. In
* particular this is the only function that calls etr_update_eacr(),
* it "controls" the etr control register.
*/
@@ -914,6 +978,9 @@ static void etr_work_fn(struct work_struct *work)
struct etr_aib aib;
int sync_port;
+ /* prevent multiple execution. */
+ mutex_lock(&etr_work_mutex);
+
/* Create working copy of etr_eacr. */
eacr = etr_eacr;
@@ -929,7 +996,7 @@ static void etr_work_fn(struct work_struct *work)
del_timer_sync(&etr_timer);
etr_update_eacr(eacr);
clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
- return;
+ goto out_unlock;
}
/* Store aib to get the current ETR status word. */
@@ -1016,7 +1083,7 @@ static void etr_work_fn(struct work_struct *work)
eacr.es || sync_port < 0) {
etr_update_eacr(eacr);
etr_set_tolec_timeout(now);
- return;
+ goto out_unlock;
}
/*
@@ -1036,7 +1103,7 @@ static void etr_work_fn(struct work_struct *work)
etr_update_eacr(eacr);
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
if (now < etr_tolec + (1600000 << 12) ||
- etr_sync_clock(&aib, sync_port) != 0) {
+ etr_sync_clock_stop(&aib, sync_port) != 0) {
/* Sync failed. Try again in 1/2 second. */
eacr.es = 0;
etr_update_eacr(eacr);
@@ -1044,6 +1111,8 @@ static void etr_work_fn(struct work_struct *work)
etr_set_sync_timeout();
} else
etr_set_tolec_timeout(now);
+out_unlock:
+ mutex_unlock(&etr_work_mutex);
}
/*
@@ -1125,13 +1194,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
return count; /* Nothing to do. */
etr_port0_online = value;
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
} else {
if (etr_port1_online == value)
return count; /* Nothing to do. */
etr_port1_online = value;
set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
}
return count;
}
@@ -1332,6 +1401,7 @@ static struct stp_sstpi stp_info;
static void *stp_page;
static void stp_work_fn(struct work_struct *work);
+static DEFINE_MUTEX(stp_work_mutex);
static DECLARE_WORK(stp_work, stp_work_fn);
static int __init early_parse_stp(char *p)
@@ -1356,7 +1426,8 @@ static void __init stp_reset(void)
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
- printk(KERN_WARNING "Running on non STP capable machine.\n");
+ pr_warning("The real or virtual hardware system does "
+ "not provide an STP interface\n");
free_bootmem((unsigned long) stp_page, PAGE_SIZE);
stp_page = NULL;
stp_online = 0;
@@ -1365,8 +1436,12 @@ static void __init stp_reset(void)
static int __init stp_init(void)
{
- if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
- schedule_work(&stp_work);
+ if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
+ return 0;
+ time_init_wq();
+ if (!stp_online)
+ return 0;
+ queue_work(time_sync_wq, &stp_work);
return 0;
}
@@ -1383,7 +1458,7 @@ arch_initcall(stp_init);
static void stp_timing_alert(struct stp_irq_parm *intparm)
{
if (intparm->tsc || intparm->lac || intparm->tcpc)
- schedule_work(&stp_work);
+ queue_work(time_sync_wq, &stp_work);
}
/*
@@ -1397,7 +1472,7 @@ void stp_sync_check(void)
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return;
disable_sync_clock(NULL);
- schedule_work(&stp_work);
+ queue_work(time_sync_wq, &stp_work);
}
/*
@@ -1411,46 +1486,34 @@ void stp_island_check(void)
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return;
disable_sync_clock(NULL);
- schedule_work(&stp_work);
+ queue_work(time_sync_wq, &stp_work);
}
-/*
- * STP tasklet. Check for the STP state and take over the clock
- * synchronization if the STP clock source is usable.
- */
-static void stp_work_fn(struct work_struct *work)
+
+static int stp_sync_clock(void *data)
{
- struct clock_sync_data stp_sync;
+ static int first;
unsigned long long old_clock, delta;
+ struct clock_sync_data *stp_sync;
int rc;
- if (!stp_online) {
- chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
- return;
- }
+ stp_sync = data;
- rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
- if (rc)
- return;
+ if (xchg(&first, 1) == 1) {
+ /* Slave */
+ clock_sync_cpu(stp_sync);
+ return 0;
+ }
- rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
- if (rc || stp_info.c == 0)
- return;
+ /* Wait until all other cpus entered the sync function. */
+ while (atomic_read(&stp_sync->cpus) != 0)
+ cpu_relax();
- /*
- * Catch all other cpus and make them wait until we have
- * successfully synced the clock. smp_call_function will
- * return after all other cpus are in clock_sync_cpu_start.
- */
- memset(&stp_sync, 0, sizeof(stp_sync));
- preempt_disable();
- smp_call_function(clock_sync_cpu_start, &stp_sync, 0);
- local_irq_disable();
enable_sync_clock();
set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
rc = 0;
if (stp_info.todoff[0] || stp_info.todoff[1] ||
@@ -1469,16 +1532,49 @@ static void stp_work_fn(struct work_struct *work)
}
if (rc) {
disable_sync_clock(NULL);
- stp_sync.in_sync = -EAGAIN;
+ stp_sync->in_sync = -EAGAIN;
clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
if (etr_port0_online || etr_port1_online)
- schedule_work(&etr_work);
+ queue_work(time_sync_wq, &etr_work);
} else
- stp_sync.in_sync = 1;
+ stp_sync->in_sync = 1;
+ xchg(&first, 0);
+ return 0;
+}
+
+/*
+ * STP work. Check for the STP state and take over the clock
+ * synchronization if the STP clock source is usable.
+ */
+static void stp_work_fn(struct work_struct *work)
+{
+ struct clock_sync_data stp_sync;
+ int rc;
+
+ /* prevent multiple execution. */
+ mutex_lock(&stp_work_mutex);
+
+ if (!stp_online) {
+ chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
+ goto out_unlock;
+ }
+
+ rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
+ if (rc)
+ goto out_unlock;
+
+ rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
+ if (rc || stp_info.c == 0)
+ goto out_unlock;
+
+ memset(&stp_sync, 0, sizeof(stp_sync));
+ get_online_cpus();
+ atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
+ stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
+ put_online_cpus();
- local_irq_enable();
- smp_call_function(clock_sync_cpu_end, NULL, 0);
- preempt_enable();
+out_unlock:
+ mutex_unlock(&stp_work_mutex);
}
/*
@@ -1587,7 +1683,7 @@ static ssize_t stp_online_store(struct sysdev_class *class,
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
stp_online = value;
- schedule_work(&stp_work);
+ queue_work(time_sync_wq, &stp_work);
return count;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bf96f1b5c6ec..90e9ba11eba1 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -3,6 +3,9 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
+#define KMSG_COMPONENT "cpu"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -12,6 +15,7 @@
#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/smp.h>
+#include <linux/cpuset.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
#include <asm/sysinfo.h>
@@ -57,11 +61,11 @@ struct core_info {
cpumask_t mask;
};
+static int topology_enabled;
static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info;
static struct core_info core_info;
static int machine_has_topology;
-static int machine_has_topology_irq;
static struct timer_list topology_timer;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
@@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
cpumask_t mask;
cpus_clear(mask);
- if (!machine_has_topology)
- return cpu_present_map;
+ if (!topology_enabled || !machine_has_topology)
+ return cpu_possible_map;
spin_lock_irqsave(&topology_lock, flags);
while (core) {
if (cpu_isset(cpu, core->mask)) {
@@ -168,7 +172,7 @@ static void topology_update_polarization_simple(void)
int cpu;
mutex_lock(&smp_cpu_state_mutex);
- for_each_present_cpu(cpu)
+ for_each_possible_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
mutex_unlock(&smp_cpu_state_mutex);
}
@@ -199,7 +203,7 @@ int topology_set_cpu_management(int fc)
rc = ptf(PTF_HORIZONTAL);
if (rc)
return -EBUSY;
- for_each_present_cpu(cpu)
+ for_each_possible_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
return rc;
}
@@ -208,7 +212,7 @@ static void update_cpu_core_map(void)
{
int cpu;
- for_each_present_cpu(cpu)
+ for_each_possible_cpu(cpu)
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
}
@@ -235,7 +239,7 @@ int arch_update_cpu_topology(void)
static void topology_work_fn(struct work_struct *work)
{
- arch_reinit_sched_domains();
+ rebuild_sched_domains();
}
void topology_schedule_update(void)
@@ -258,10 +262,14 @@ static void set_topology_timer(void)
add_timer(&topology_timer);
}
-static void topology_interrupt(__u16 code)
+static int __init early_parse_topology(char *p)
{
- schedule_work(&topology_work);
+ if (strncmp(p, "on", 2))
+ return 0;
+ topology_enabled = 1;
+ return 0;
}
+early_param("topology", early_parse_topology);
static int __init init_topology_update(void)
{
@@ -273,14 +281,7 @@ static int __init init_topology_update(void)
goto out;
}
init_timer_deferrable(&topology_timer);
- if (machine_has_topology_irq) {
- rc = register_external_interrupt(0x2005, topology_interrupt);
- if (rc)
- goto out;
- ctl_set_bit(0, 8);
- }
- else
- set_topology_timer();
+ set_topology_timer();
out:
update_cpu_core_map();
return rc;
@@ -301,9 +302,6 @@ void __init s390_init_cpu_topology(void)
return;
machine_has_topology = 1;
- if (facility_bits & (1ULL << 51))
- machine_has_topology_irq = 1;
-
tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info;
stsi(info, 15, 1, 2);
@@ -312,7 +310,7 @@ void __init s390_init_cpu_topology(void)
for (i = 0; i < info->mnest - 2; i++)
nr_cores *= info->mag[NR_MAG - 3 - i];
- printk(KERN_INFO "CPU topology:");
+ pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
@@ -327,5 +325,4 @@ void __init s390_init_cpu_topology(void)
return;
error:
machine_has_topology = 0;
- machine_has_topology_irq = 0;
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
new file mode 100644
index 000000000000..10a6ccef4412
--- /dev/null
+++ b/arch/s390/kernel/vdso.c
@@ -0,0 +1,234 @@
+/*
+ * vdso setup for s390
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/elf.h>
+#include <linux/security.h>
+#include <linux/bootmem.h>
+
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/vdso.h>
+
+/* Max supported size for symbol names */
+#define MAX_SYMNAME 64
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+extern char vdso32_start, vdso32_end;
+static void *vdso32_kbase = &vdso32_start;
+static unsigned int vdso32_pages;
+static struct page **vdso32_pagelist;
+#endif
+
+#ifdef CONFIG_64BIT
+extern char vdso64_start, vdso64_end;
+static void *vdso64_kbase = &vdso64_start;
+static unsigned int vdso64_pages;
+static struct page **vdso64_pagelist;
+#endif /* CONFIG_64BIT */
+
+/*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+ */
+unsigned int __read_mostly vdso_enabled = 1;
+
+static int __init vdso_setup(char *s)
+{
+ vdso_enabled = simple_strtoul(s, NULL, 0);
+ return 1;
+}
+__setup("vdso=", vdso_setup);
+
+/*
+ * The vdso data page
+ */
+static union {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+} vdso_data_store __attribute__((__section__(".data.page_aligned")));
+struct vdso_data *vdso_data = &vdso_data_store.data;
+
+/*
+ * This is called from binfmt_elf, we create the special vma for the
+ * vDSO and insert it into the mm struct tree
+ */
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ struct page **vdso_pagelist;
+ unsigned long vdso_pages;
+ unsigned long vdso_base;
+ int rc;
+
+ if (!vdso_enabled)
+ return 0;
+ /*
+ * Only map the vdso for dynamically linked elf binaries.
+ */
+ if (!uses_interp)
+ return 0;
+
+ vdso_base = mm->mmap_base;
+#ifdef CONFIG_64BIT
+ vdso_pagelist = vdso64_pagelist;
+ vdso_pages = vdso64_pages;
+#ifdef CONFIG_COMPAT
+ if (test_thread_flag(TIF_31BIT)) {
+ vdso_pagelist = vdso32_pagelist;
+ vdso_pages = vdso32_pages;
+ }
+#endif
+#else
+ vdso_pagelist = vdso32_pagelist;
+ vdso_pages = vdso32_pages;
+#endif
+
+ /*
+ * vDSO has a problem and was disabled, just don't "enable" it for
+ * the process
+ */
+ if (vdso_pages == 0)
+ return 0;
+
+ current->mm->context.vdso_base = 0;
+
+ /*
+ * pick a base address for the vDSO in process space. We try to put
+ * it at vdso_base which is the "natural" base for it, but we might
+ * fail and end up putting it elsewhere.
+ */
+ down_write(&mm->mmap_sem);
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ vdso_pages << PAGE_SHIFT, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto out_up;
+ }
+
+ /*
+ * our vma flags don't have VM_WRITE so by default, the process
+ * isn't allowed to write those pages.
+ * gdb can break that with ptrace interface, and thus trigger COW
+ * on those pages but it's then your responsibility to never do that
+ * on the "data" page of the vDSO or you'll stop getting kernel
+ * updates and your nice userland gettimeofday will be totally dead.
+ * It's fine to use that for setting breakpoints in the vDSO code
+ * pages though
+ *
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+ * without matching up the same kernel and hardware config to see
+ * what PC values meant.
+ */
+ rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_ALWAYSDUMP,
+ vdso_pagelist);
+ if (rc)
+ goto out_up;
+
+ /* Put vDSO base into mm struct */
+ current->mm->context.vdso_base = vdso_base;
+
+ up_write(&mm->mmap_sem);
+ return 0;
+
+out_up:
+ up_write(&mm->mmap_sem);
+ return rc;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
+ return "[vdso]";
+ return NULL;
+}
+
+static int __init vdso_init(void)
+{
+ int i;
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+ /* Calculate the size of the 32 bit vDSO */
+ vdso32_pages = ((&vdso32_end - &vdso32_start
+ + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
+
+ /* Make sure pages are in the correct state */
+ vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
+ GFP_KERNEL);
+ BUG_ON(vdso32_pagelist == NULL);
+ for (i = 0; i < vdso32_pages - 1; i++) {
+ struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
+ ClearPageReserved(pg);
+ get_page(pg);
+ vdso32_pagelist[i] = pg;
+ }
+ vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
+ vdso32_pagelist[vdso32_pages] = NULL;
+#endif
+
+#ifdef CONFIG_64BIT
+ /* Calculate the size of the 64 bit vDSO */
+ vdso64_pages = ((&vdso64_end - &vdso64_start
+ + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
+
+ /* Make sure pages are in the correct state */
+ vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
+ GFP_KERNEL);
+ BUG_ON(vdso64_pagelist == NULL);
+ for (i = 0; i < vdso64_pages - 1; i++) {
+ struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
+ ClearPageReserved(pg);
+ get_page(pg);
+ vdso64_pagelist[i] = pg;
+ }
+ vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
+ vdso64_pagelist[vdso64_pages] = NULL;
+#endif /* CONFIG_64BIT */
+
+ get_page(virt_to_page(vdso_data));
+
+ smp_wmb();
+
+ return 0;
+}
+arch_initcall(vdso_init);
+
+int in_gate_area_no_task(unsigned long addr)
+{
+ return 0;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+ return 0;
+}
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+ return NULL;
+}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
new file mode 100644
index 000000000000..ca78ad60ba24
--- /dev/null
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -0,0 +1,55 @@
+# List of files in the vdso, has to be asm only for now
+
+obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o
+
+# Build rules
+
+targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
+obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+
+KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS))
+KBUILD_AFLAGS_31 += -m31 -s
+
+KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
+KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
+
+$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
+$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
+
+obj-y += vdso32_wrapper.o
+extra-y += vdso32.lds
+CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
+
+# link rule for the .so file, .lds has to be first
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+ $(call if_changed,vdso32ld)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# assembly rules for the .S files
+$(obj-vdso32): %.o: %.S
+ $(call if_changed_dep,vdso32as)
+
+# actual build commands
+quiet_cmd_vdso32ld = VDSO32L $@
+ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+quiet_cmd_vdso32as = VDSO32A $@
+ cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso32.so: $(obj)/vdso32.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso32.so
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
new file mode 100644
index 000000000000..9532c4e6a9d2
--- /dev/null
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -0,0 +1,39 @@
+/*
+ * Userland implementation of clock_getres() for 32 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+ .text
+ .align 4
+ .globl __kernel_clock_getres
+ .type __kernel_clock_getres,@function
+__kernel_clock_getres:
+ .cfi_startproc
+ chi %r2,CLOCK_REALTIME
+ je 0f
+ chi %r2,CLOCK_MONOTONIC
+ jne 3f
+0: ltr %r3,%r3
+ jz 2f /* res == NULL */
+ basr %r1,0
+1: l %r0,4f-1b(%r1)
+ xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
+ st %r0,4(%r3) /* store tp->tv_usec */
+2: lhi %r2,0
+ br %r14
+3: lhi %r1,__NR_clock_getres /* fallback to svc */
+ svc 0
+ br %r14
+4: .long CLOCK_REALTIME_RES
+ .cfi_endproc
+ .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
new file mode 100644
index 000000000000..4a98909a8310
--- /dev/null
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -0,0 +1,128 @@
+/*
+ * Userland implementation of clock_gettime() for 32 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+ .text
+ .align 4
+ .globl __kernel_clock_gettime
+ .type __kernel_clock_gettime,@function
+__kernel_clock_gettime:
+ .cfi_startproc
+ basr %r5,0
+0: al %r5,21f-0b(%r5) /* get &_vdso_data */
+ chi %r2,CLOCK_REALTIME
+ je 10f
+ chi %r2,CLOCK_MONOTONIC
+ jne 19f
+
+ /* CLOCK_MONOTONIC */
+ ltr %r3,%r3
+ jz 9f /* tp == NULL */
+1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
+ tml %r4,0x0001 /* pending update ? loop */
+ jnz 1b
+ stck 24(%r15) /* Store TOD clock */
+ lm %r0,%r1,24(%r15)
+ s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ sl %r1,__VDSO_XTIME_STAMP+4(%r5)
+ brc 3,2f
+ ahi %r0,-1
+2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+ lr %r2,%r0
+ lhi %r0,1000
+ ltr %r1,%r1
+ mr %r0,%r0
+ jnm 3f
+ ahi %r0,1000
+3: alr %r0,%r2
+ srdl %r0,12
+ al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ al %r1,__VDSO_XTIME_NSEC+4(%r5)
+ brc 12,4f
+ ahi %r0,1
+4: l %r2,__VDSO_XTIME_SEC+4(%r5)
+ al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
+ al %r1,__VDSO_WTOM_NSEC+4(%r5)
+ brc 12,5f
+ ahi %r0,1
+5: al %r2,__VDSO_WTOM_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 1b
+ basr %r5,0
+6: ltr %r0,%r0
+ jnz 7f
+ cl %r1,20f-6b(%r5)
+ jl 8f
+7: ahi %r2,1
+ sl %r1,20f-6b(%r5)
+ brc 3,6b
+ ahi %r0,-1
+ j 6b
+8: st %r2,0(%r3) /* store tp->tv_sec */
+ st %r1,4(%r3) /* store tp->tv_nsec */
+9: lhi %r2,0
+ br %r14
+
+ /* CLOCK_REALTIME */
+10: ltr %r3,%r3 /* tp == NULL */
+ jz 18f
+11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
+ tml %r4,0x0001 /* pending update ? loop */
+ jnz 11b
+ stck 24(%r15) /* Store TOD clock */
+ lm %r0,%r1,24(%r15)
+ s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ sl %r1,__VDSO_XTIME_STAMP+4(%r5)
+ brc 3,12f
+ ahi %r0,-1
+12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+ lr %r2,%r0
+ lhi %r0,1000
+ ltr %r1,%r1
+ mr %r0,%r0
+ jnm 13f
+ ahi %r0,1000
+13: alr %r0,%r2
+ srdl %r0,12
+ al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ al %r1,__VDSO_XTIME_NSEC+4(%r5)
+ brc 12,14f
+ ahi %r0,1
+14: l %r2,__VDSO_XTIME_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 11b
+ basr %r5,0
+15: ltr %r0,%r0
+ jnz 16f
+ cl %r1,20f-15b(%r5)
+ jl 17f
+16: ahi %r2,1
+ sl %r1,20f-15b(%r5)
+ brc 3,15b
+ ahi %r0,-1
+ j 15b
+17: st %r2,0(%r3) /* store tp->tv_sec */
+ st %r1,4(%r3) /* store tp->tv_nsec */
+18: lhi %r2,0
+ br %r14
+
+ /* Fallback to system call */
+19: lhi %r1,__NR_clock_gettime
+ svc 0
+ br %r14
+
+20: .long 1000000000
+21: .long _vdso_data - 0b
+ .cfi_endproc
+ .size __kernel_clock_gettime,.-__kernel_clock_gettime
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
new file mode 100644
index 000000000000..c32f29c3d70c
--- /dev/null
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -0,0 +1,82 @@
+/*
+ * Userland implementation of gettimeofday() for 32 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+ .text
+ .align 4
+ .globl __kernel_gettimeofday
+ .type __kernel_gettimeofday,@function
+__kernel_gettimeofday:
+ .cfi_startproc
+ basr %r5,0
+0: al %r5,13f-0b(%r5) /* get &_vdso_data */
+1: ltr %r3,%r3 /* check if tz is NULL */
+ je 2f
+ mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
+2: ltr %r2,%r2 /* check if tv is NULL */
+ je 10f
+ l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
+ tml %r4,0x0001 /* pending update ? loop */
+ jnz 1b
+ stck 24(%r15) /* Store TOD clock */
+ lm %r0,%r1,24(%r15)
+ s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ sl %r1,__VDSO_XTIME_STAMP+4(%r5)
+ brc 3,3f
+ ahi %r0,-1
+3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+ st %r0,24(%r15)
+ lhi %r0,1000
+ ltr %r1,%r1
+ mr %r0,%r0
+ jnm 4f
+ ahi %r0,1000
+4: al %r0,24(%r15)
+ srdl %r0,12
+ al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ al %r1,__VDSO_XTIME_NSEC+4(%r5)
+ brc 12,5f
+ ahi %r0,1
+5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 1b
+ l %r4,24(%r15) /* get tv_sec from stack */
+ basr %r5,0
+6: ltr %r0,%r0
+ jnz 7f
+ cl %r1,11f-6b(%r5)
+ jl 8f
+7: ahi %r4,1
+ sl %r1,11f-6b(%r5)
+ brc 3,6b
+ ahi %r0,-1
+ j 6b
+8: st %r4,0(%r2) /* store tv->tv_sec */
+ ltr %r1,%r1
+ m %r0,12f-6b(%r5)
+ jnm 9f
+ al %r0,12f-6b(%r5)
+9: srl %r0,6
+ st %r0,4(%r2) /* store tv->tv_usec */
+10: slr %r2,%r2
+ br %r14
+11: .long 1000000000
+12: .long 274877907
+13: .long _vdso_data - 0b
+ .cfi_endproc
+ .size __kernel_gettimeofday,.-__kernel_gettimeofday
diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso32/note.S
new file mode 100644
index 000000000000..79a071e4357e
--- /dev/null
+++ b/arch/s390/kernel/vdso32/note.S
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
new file mode 100644
index 000000000000..a8c379fa1247
--- /dev/null
+++ b/arch/s390/kernel/vdso32/vdso32.lds.S
@@ -0,0 +1,138 @@
+/*
+ * This is the infamous ld script for the 32 bits vdso
+ * library
+ */
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+OUTPUT_ARCH(s390:31-bit)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = VDSO32_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ . = ALIGN(16);
+ .text : {
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ } :text
+ PROVIDE(__etext = .);
+ PROVIDE(_etext = .);
+ PROVIDE(etext = .);
+
+ /*
+ * Other stuff is appended to the text segment:
+ */
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+ .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
+
+ .rela.dyn ALIGN(8) : { *(.rela.dyn) }
+ .got ALIGN(8) : { *(.got .toc) }
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /*
+ * Stabs debugging sections are here too.
+ */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+
+ /*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to the
+ * beginning of the section so we begin them at 0.
+ */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+
+ . = ALIGN(4096);
+ PROVIDE(_vdso_data = .);
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.branch_lt)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME 0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ VDSO_VERSION_STRING {
+ global:
+ /*
+ * Has to be there for the kernel to find
+ */
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+
+ local: *;
+ };
+}
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S
new file mode 100644
index 000000000000..61639a89e70b
--- /dev/null
+++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S
@@ -0,0 +1,13 @@
+#include <linux/init.h>
+#include <asm/page.h>
+
+ .section ".data.page_aligned"
+
+ .globl vdso32_start, vdso32_end
+ .balign PAGE_SIZE
+vdso32_start:
+ .incbin "arch/s390/kernel/vdso32/vdso32.so"
+ .balign PAGE_SIZE
+vdso32_end:
+
+ .previous
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
new file mode 100644
index 000000000000..6fc8e829258c
--- /dev/null
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -0,0 +1,55 @@
+# List of files in the vdso, has to be asm only for now
+
+obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o
+
+# Build rules
+
+targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
+obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+
+KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
+KBUILD_AFLAGS_64 += -m64 -s
+
+KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
+KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
+
+$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
+$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
+
+obj-y += vdso64_wrapper.o
+extra-y += vdso64.lds
+CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
+
+# Force dependency (incbin is bad)
+$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
+
+# link rule for the .so file, .lds has to be first
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+ $(call if_changed,vdso64ld)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# assembly rules for the .S files
+$(obj-vdso64): %.o: %.S
+ $(call if_changed_dep,vdso64as)
+
+# actual build commands
+quiet_cmd_vdso64ld = VDSO64L $@
+ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+quiet_cmd_vdso64as = VDSO64A $@
+ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso64.so: $(obj)/vdso64.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso64.so
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
new file mode 100644
index 000000000000..488e31a3c0e7
--- /dev/null
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -0,0 +1,39 @@
+/*
+ * Userland implementation of clock_getres() for 64 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+ .text
+ .align 4
+ .globl __kernel_clock_getres
+ .type __kernel_clock_getres,@function
+__kernel_clock_getres:
+ .cfi_startproc
+ cghi %r2,CLOCK_REALTIME
+ je 0f
+ cghi %r2,CLOCK_MONOTONIC
+ jne 2f
+0: ltgr %r3,%r3
+ jz 1f /* res == NULL */
+ larl %r1,3f
+ lg %r0,0(%r1)
+ xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
+ stg %r0,8(%r3) /* store tp->tv_usec */
+1: lghi %r2,0
+ br %r14
+2: lghi %r1,__NR_clock_getres /* fallback to svc */
+ svc 0
+ br %r14
+3: .quad CLOCK_REALTIME_RES
+ .cfi_endproc
+ .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
new file mode 100644
index 000000000000..738a410b7eb2
--- /dev/null
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -0,0 +1,89 @@
+/*
+ * Userland implementation of clock_gettime() for 64 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+ .text
+ .align 4
+ .globl __kernel_clock_gettime
+ .type __kernel_clock_gettime,@function
+__kernel_clock_gettime:
+ .cfi_startproc
+ larl %r5,_vdso_data
+ cghi %r2,CLOCK_REALTIME
+ je 4f
+ cghi %r2,CLOCK_MONOTONIC
+ jne 9f
+
+ /* CLOCK_MONOTONIC */
+ ltgr %r3,%r3
+ jz 3f /* tp == NULL */
+0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 0b
+ stck 48(%r15) /* Store TOD clock */
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ mghi %r1,1000
+ srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ lg %r0,__VDSO_XTIME_SEC(%r5)
+ alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
+ alg %r0,__VDSO_WTOM_SEC(%r5)
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 0b
+ larl %r5,10f
+1: clg %r1,0(%r5)
+ jl 2f
+ slg %r1,0(%r5)
+ aghi %r0,1
+ j 1b
+2: stg %r0,0(%r3) /* store tp->tv_sec */
+ stg %r1,8(%r3) /* store tp->tv_nsec */
+3: lghi %r2,0
+ br %r14
+
+ /* CLOCK_REALTIME */
+4: ltr %r3,%r3 /* tp == NULL */
+ jz 8f
+5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 5b
+ stck 48(%r15) /* Store TOD clock */
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ mghi %r1,1000
+ srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
+ lg %r0,__VDSO_XTIME_SEC(%r5)
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 5b
+ larl %r5,10f
+6: clg %r1,0(%r5)
+ jl 7f
+ slg %r1,0(%r5)
+ aghi %r0,1
+ j 6b
+7: stg %r0,0(%r3) /* store tp->tv_sec */
+ stg %r1,8(%r3) /* store tp->tv_nsec */
+8: lghi %r2,0
+ br %r14
+
+ /* Fallback to system call */
+9: lghi %r1,__NR_clock_gettime
+ svc 0
+ br %r14
+
+10: .quad 1000000000
+ .cfi_endproc
+ .size __kernel_clock_gettime,.-__kernel_clock_gettime
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
new file mode 100644
index 000000000000..f873e75634e1
--- /dev/null
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -0,0 +1,56 @@
+/*
+ * Userland implementation of gettimeofday() for 64 bits processes in a
+ * s390 kernel for use in the vDSO
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+#include <asm/vdso.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+ .text
+ .align 4
+ .globl __kernel_gettimeofday
+ .type __kernel_gettimeofday,@function
+__kernel_gettimeofday:
+ .cfi_startproc
+ larl %r5,_vdso_data
+0: ltgr %r3,%r3 /* check if tz is NULL */
+ je 1f
+ mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
+1: ltgr %r2,%r2 /* check if tv is NULL */
+ je 4f
+ lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 0b
+ stck 48(%r15) /* Store TOD clock */
+ lg %r1,48(%r15)
+ sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
+ mghi %r1,1000
+ srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
+ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
+ lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 0b
+ larl %r5,5f
+2: clg %r1,0(%r5)
+ jl 3f
+ slg %r1,0(%r5)
+ aghi %r0,1
+ j 2b
+3: stg %r0,0(%r2) /* store tv->tv_sec */
+ slgr %r0,%r0 /* tv_nsec -> tv_usec */
+ ml %r0,8(%r5)
+ srlg %r0,%r0,6
+ stg %r0,8(%r2) /* store tv->tv_usec */
+4: lghi %r2,0
+ br %r14
+5: .quad 1000000000
+ .long 274877907
+ .cfi_endproc
+ .size __kernel_gettimeofday,.-__kernel_gettimeofday
diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S
new file mode 100644
index 000000000000..79a071e4357e
--- /dev/null
+++ b/arch/s390/kernel/vdso64/note.S
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
new file mode 100644
index 000000000000..9f5979d102a9
--- /dev/null
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -0,0 +1,138 @@
+/*
+ * This is the infamous ld script for the 64 bits vdso
+ * library
+ */
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+OUTPUT_ARCH(s390:64-bit)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = VDSO64_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ . = ALIGN(16);
+ .text : {
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ } :text
+ PROVIDE(__etext = .);
+ PROVIDE(_etext = .);
+ PROVIDE(etext = .);
+
+ /*
+ * Other stuff is appended to the text segment:
+ */
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+ .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
+
+ .rela.dyn ALIGN(8) : { *(.rela.dyn) }
+ .got ALIGN(8) : { *(.got .toc) }
+
+ _end = .;
+ PROVIDE(end = .);
+
+ /*
+ * Stabs debugging sections are here too.
+ */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+
+ /*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to the
+ * beginning of the section so we begin them at 0.
+ */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+
+ . = ALIGN(4096);
+ PROVIDE(_vdso_data = .);
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.branch_lt)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME 0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ VDSO_VERSION_STRING {
+ global:
+ /*
+ * Has to be there for the kernel to find
+ */
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+
+ local: *;
+ };
+}
diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S
new file mode 100644
index 000000000000..d8e2ac14d564
--- /dev/null
+++ b/arch/s390/kernel/vdso64/vdso64_wrapper.S
@@ -0,0 +1,13 @@
+#include <linux/init.h>
+#include <asm/page.h>
+
+ .section ".data.page_aligned"
+
+ .globl vdso64_start, vdso64_end
+ .balign PAGE_SIZE
+vdso64_start:
+ .incbin "arch/s390/kernel/vdso64/vdso64.so"
+ .balign PAGE_SIZE
+vdso64_end:
+
+ .previous
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 0fa5dc5d68e1..75a6e62ea973 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -27,7 +27,6 @@
static ext_int_info_t ext_int_info_timer;
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
@@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires)
/* store expire time for this CPU timer */
__get_cpu_var(virt_cpu_timer).to_expire = expires;
}
-#else
-static inline void set_vtimer(__u64 expires)
-{
- S390_lowcore.last_update_timer = expires;
- asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
-
- /* store expire time for this CPU timer */
- __get_cpu_var(virt_cpu_timer).to_expire = expires;
-}
-#endif
void vtime_start_cpu_timer(void)
{
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 580fc64cc735..5c8457129603 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -7,6 +7,9 @@
* (C) IBM Corporation 2002-2004
*/
+#define KMSG_COMPONENT "extmem"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -24,19 +27,6 @@
#include <asm/cpcmd.h>
#include <asm/setup.h>
-#define DCSS_DEBUG /* Debug messages on/off */
-
-#define DCSS_NAME "extmem"
-#ifdef DCSS_DEBUG
-#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
-#else
-#define PRINT_DEBUG(x...) do {} while (0)
-#endif
-#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
-#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
-#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
-
-
#define DCSS_LOADSHR 0x00
#define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08
@@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg)
goto out_free;
}
if (diag_cc > 1) {
- PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc);
+ pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc);
goto out_free;
}
@@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg)
* -EIO : could not perform query diagnose
* -ENOENT : no such segment
* -ENOTSUPP: multi-part segment cannot be used with linux
- * -ENOSPC : segment cannot be used (overlaps with storage)
* -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/
@@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
goto out_resource;
}
if (diag_cc > 1) {
- PRINT_WARN ("segment_load: could not load segment %s - "
- "diag returned error (%ld)\n",
- name, end_addr);
+ pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
+ end_addr);
rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy);
@@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
*addr = seg->start_addr;
*end = seg->end;
if (do_nonshared)
- PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
- "type %s in non-shared mode\n", name,
- (void*)seg->start_addr, (void*)seg->end,
- segtype_string[seg->vm_segtype]);
+ pr_info("DCSS %s of range %p to %p and type %s loaded as "
+ "exclusive-writable\n", name, (void*) seg->start_addr,
+ (void*) seg->end, segtype_string[seg->vm_segtype]);
else {
- PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
- "type %s in shared mode\n", name,
- (void*)seg->start_addr, (void*)seg->end,
- segtype_string[seg->vm_segtype]);
+ pr_info("DCSS %s of range %p to %p and type %s loaded in "
+ "shared access mode\n", name, (void*) seg->start_addr,
+ (void*) seg->end, segtype_string[seg->vm_segtype]);
}
goto out;
out_resource:
@@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_unlock;
}
if (do_nonshared == seg->do_nonshared) {
- PRINT_INFO ("segment_modify_shared: not reloading segment %s"
- " - already in requested mode\n",name);
+ pr_info("DCSS %s is already in the requested access "
+ "mode\n", name);
rc = 0;
goto out_unlock;
}
if (atomic_read (&seg->ref_count) != 1) {
- PRINT_WARN ("segment_modify_shared: not reloading segment %s - "
- "segment is in use by other driver(s)\n",name);
+ pr_warning("DCSS %s is in use and cannot be reloaded\n",
+ name);
rc = -EAGAIN;
goto out_unlock;
}
@@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared)
seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) {
- PRINT_WARN("segment_modify_shared: could not reload segment %s"
- " - overlapping resources\n", name);
+ pr_warning("DCSS %s overlaps with used memory resources "
+ "and cannot be reloaded\n", name);
rc = -EBUSY;
kfree(seg->res);
goto out_del_mem;
@@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_del_res;
}
if (diag_cc > 1) {
- PRINT_WARN ("segment_modify_shared: could not reload segment %s"
- " - diag returned error (%ld)\n",
- name, end_addr);
+ pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
+ end_addr);
rc = dcss_diag_translate_rc(end_addr);
goto out_del_res;
}
@@ -673,8 +658,7 @@ segment_unload(char *name)
mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
- PRINT_ERR ("could not find segment %s in segment_unload, "
- "please report to linux390@de.ibm.com\n",name);
+ pr_err("Unloading unknown DCSS %s failed\n", name);
goto out_unlock;
}
if (atomic_dec_return(&seg->ref_count) != 0)
@@ -709,8 +693,7 @@ segment_save(char *name)
seg = segment_by_name (name);
if (seg == NULL) {
- PRINT_ERR("could not find segment %s in segment_save, please "
- "report to linux390@de.ibm.com\n", name);
+ pr_err("Saving unknown DCSS %s failed\n", name);
goto out;
}
@@ -727,14 +710,14 @@ segment_save(char *name)
response = 0;
cpcmd(cmd1, NULL, 0, &response);
if (response) {
- PRINT_ERR("segment_save: DEFSEG failed with response code %i\n",
- response);
+ pr_err("Saving a DCSS failed with DEFSEG response code "
+ "%i\n", response);
goto out;
}
cpcmd(cmd2, NULL, 0, &response);
if (response) {
- PRINT_ERR("segment_save: SAVESEG failed with response code %i\n",
- response);
+ pr_err("Saving a DCSS failed with SAVESEG response code "
+ "%i\n", response);
goto out;
}
out:
@@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name)
{
switch (rc) {
case -ENOENT:
- PRINT_WARN("cannot load/query segment %s, "
- "does not exist\n", seg_name);
+ pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
break;
case -ENOSYS:
- PRINT_WARN("cannot load/query segment %s, "
- "not running on VM\n", seg_name);
+ pr_err("DCSS %s cannot be loaded or queried without "
+ "z/VM\n", seg_name);
break;
case -EIO:
- PRINT_WARN("cannot load/query segment %s, "
- "hardware error\n", seg_name);
+ pr_err("Loading or querying DCSS %s resulted in a "
+ "hardware error\n", seg_name);
break;
case -ENOTSUPP:
- PRINT_WARN("cannot load/query segment %s, "
- "is a multi-part segment\n", seg_name);
+ pr_err("DCSS %s has multiple page ranges and cannot be "
+ "loaded or queried\n", seg_name);
break;
case -ENOSPC:
- PRINT_WARN("cannot load/query segment %s, "
- "overlaps with storage\n", seg_name);
+ pr_err("DCSS %s overlaps with used storage and cannot "
+ "be loaded\n", seg_name);
break;
case -EBUSY:
- PRINT_WARN("cannot load/query segment %s, "
- "overlaps with already loaded dcss\n", seg_name);
+ pr_err("%s needs used memory resources and cannot be "
+ "loaded or queried\n", seg_name);
break;
case -EPERM:
- PRINT_WARN("cannot load/query segment %s, "
- "already loaded in incompatible mode\n", seg_name);
+ pr_err("DCSS %s is already loaded in a different access "
+ "mode\n", seg_name);
break;
case -ENOMEM:
- PRINT_WARN("cannot load/query segment %s, "
- "out of memory\n", seg_name);
+ pr_err("There is not enough memory to load or query "
+ "DCSS %s\n", seg_name);
break;
case -ERANGE:
- PRINT_WARN("cannot load/query segment %s, "
- "exceeds kernel mapping range\n", seg_name);
+ pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
+ "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
break;
default:
- PRINT_WARN("cannot load/query segment %s, "
- "return value %i\n", seg_name, rc);
break;
}
}
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index b809f22ea638..ccb1d93bb043 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -202,7 +202,7 @@ do { \
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack);
+ int uses_interp);
extern unsigned int vdso_enabled;
extern void __kernel_vsyscall;
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 95f4de0800ec..3f7e415be86a 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -59,8 +59,7 @@ int __init vsyscall_init(void)
}
/* Setup a VMA at program startup for the vsyscall page */
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 40ca1bea7916..f51a3ddde01a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -325,7 +325,7 @@ struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack);
+ int uses_interp);
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 513f330c5832..1241f118ab56 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -310,7 +310,7 @@ int __init sysenter_setup(void)
}
/* Setup a VMA at program startup for the vsyscall page */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 257ba4a10abf..9c98cc6ba978 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
/* Setup a VMA at program startup for the vsyscall page.
Not called for compat tasks */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 43d6ba83a191..8783457b93d3 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -622,6 +622,16 @@ config HVC_BEAT
help
Toshiba's Cell Reference Set Beat Console device driver
+config HVC_IUCV
+ bool "z/VM IUCV Hypervisor console support (VM only)"
+ depends on S390
+ select HVC_DRIVER
+ select IUCV
+ default y
+ help
+ This driver provides a Hypervisor console (HVC) back-end to access
+ a Linux (console) terminal via a z/VM IUCV communication path.
+
config HVC_XEN
bool "Xen Hypervisor Console support"
depends on XEN
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 438f71317c5c..36151bae0d72 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
obj-$(CONFIG_HVC_XEN) += hvc_xen.o
+obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
new file mode 100644
index 000000000000..5ea7d7713fca
--- /dev/null
+++ b/drivers/char/hvc_iucv.c
@@ -0,0 +1,850 @@
+/*
+ * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC)
+ *
+ * This back-end for HVC provides terminal access via
+ * z/VM IUCV communication paths.
+ *
+ * Copyright IBM Corp. 2008.
+ *
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#define KMSG_COMPONENT "hvc_iucv"
+
+#include <linux/types.h>
+#include <asm/ebcdic.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <net/iucv/iucv.h>
+
+#include "hvc_console.h"
+
+
+/* HVC backend for z/VM IUCV */
+#define HVC_IUCV_MAGIC 0xc9e4c3e5
+#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
+#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
+
+/* IUCV TTY message */
+#define MSG_VERSION 0x02 /* Message version */
+#define MSG_TYPE_ERROR 0x01 /* Error message */
+#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
+#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
+#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
+#define MSG_TYPE_DATA 0x10 /* Terminal data */
+
+#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
+struct iucv_tty_msg {
+ u8 version; /* Message version */
+ u8 type; /* Message type */
+#define MSG_MAX_DATALEN (~(u16)0)
+ u16 datalen; /* Payload length */
+ u8 data[]; /* Payload buffer */
+} __attribute__((packed));
+
+enum iucv_state_t {
+ IUCV_DISCONN = 0,
+ IUCV_CONNECTED = 1,
+ IUCV_SEVERED = 2,
+};
+
+enum tty_state_t {
+ TTY_CLOSED = 0,
+ TTY_OPENED = 1,
+};
+
+struct hvc_iucv_private {
+ struct hvc_struct *hvc; /* HVC console struct reference */
+ u8 srv_name[8]; /* IUCV service name (ebcdic) */
+ enum iucv_state_t iucv_state; /* IUCV connection status */
+ enum tty_state_t tty_state; /* TTY status */
+ struct iucv_path *path; /* IUCV path pointer */
+ spinlock_t lock; /* hvc_iucv_private lock */
+ struct list_head tty_outqueue; /* outgoing IUCV messages */
+ struct list_head tty_inqueue; /* incoming IUCV messages */
+};
+
+struct iucv_tty_buffer {
+ struct list_head list; /* list pointer */
+ struct iucv_message msg; /* store an incoming IUCV message */
+ size_t offset; /* data buffer offset */
+ struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
+};
+
+/* IUCV callback handler */
+static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
+static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
+static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
+static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
+
+
+/* Kernel module parameters */
+static unsigned long hvc_iucv_devices;
+
+/* Array of allocated hvc iucv tty lines... */
+static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
+
+/* Kmem cache and mempool for iucv_tty_buffer elements */
+static struct kmem_cache *hvc_iucv_buffer_cache;
+static mempool_t *hvc_iucv_mempool;
+
+/* IUCV handler callback functions */
+static struct iucv_handler hvc_iucv_handler = {
+ .path_pending = hvc_iucv_path_pending,
+ .path_severed = hvc_iucv_path_severed,
+ .message_complete = hvc_iucv_msg_complete,
+ .message_pending = hvc_iucv_msg_pending,
+};
+
+
+/**
+ * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
+ * @num: The HVC virtual terminal number (vtermno)
+ *
+ * This function returns the struct hvc_iucv_private instance that corresponds
+ * to the HVC virtual terminal number specified as parameter @num.
+ */
+struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
+{
+ if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
+ return NULL;
+ return hvc_iucv_table[num - HVC_IUCV_MAGIC];
+}
+
+/**
+ * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element.
+ * @size: Size of the internal buffer used to store data.
+ * @flags: Memory allocation flags passed to mempool.
+ *
+ * This function allocates a new struct iucv_tty_buffer element and, optionally,
+ * allocates an internal data buffer with the specified size @size.
+ * Note: The total message size arises from the internal buffer size and the
+ * members of the iucv_tty_msg structure.
+ *
+ * The function returns NULL if memory allocation has failed.
+ */
+static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
+{
+ struct iucv_tty_buffer *bufp;
+
+ bufp = mempool_alloc(hvc_iucv_mempool, flags);
+ if (!bufp)
+ return NULL;
+ memset(bufp, 0, sizeof(struct iucv_tty_buffer));
+
+ if (size > 0) {
+ bufp->msg.length = MSG_SIZE(size);
+ bufp->mbuf = kmalloc(bufp->msg.length, flags);
+ if (!bufp->mbuf) {
+ mempool_free(bufp, hvc_iucv_mempool);
+ return NULL;
+ }
+ bufp->mbuf->version = MSG_VERSION;
+ bufp->mbuf->type = MSG_TYPE_DATA;
+ bufp->mbuf->datalen = (u16) size;
+ }
+ return bufp;
+}
+
+/**
+ * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
+ * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
+ *
+ * The destroy_tty_buffer() function frees the internal data buffer and returns
+ * the struct iucv_tty_buffer element back to the mempool for freeing.
+ */
+static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
+{
+ kfree(bufp->mbuf);
+ mempool_free(bufp, hvc_iucv_mempool);
+}
+
+/**
+ * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
+ * @list: List head pointer to a list containing struct iucv_tty_buffer
+ * elements.
+ *
+ * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the
+ * list @list.
+ */
+static void destroy_tty_buffer_list(struct list_head *list)
+{
+ struct iucv_tty_buffer *ent, *next;
+
+ list_for_each_entry_safe(ent, next, list, list) {
+ list_del(&ent->list);
+ destroy_tty_buffer(ent);
+ }
+}
+
+/**
+ * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer.
+ * @priv: Pointer to hvc_iucv_private structure.
+ * @buf: HVC console buffer for writing received terminal data.
+ * @count: HVC console buffer size.
+ * @has_more_data: Pointer to an int variable.
+ *
+ * The function picks up pending messages from the input queue and receives
+ * the message data that is then written to the specified buffer @buf.
+ * If the buffer size @count is less than the data message size, then the
+ * message is kept on the input queue and @has_more_data is set to 1.
+ * If the message data has been entirely written, the message is removed from
+ * the input queue.
+ *
+ * The function returns the number of bytes written to the terminal, zero if
+ * there are no pending data messages available or if there is no established
+ * IUCV path.
+ * If the IUCV path has been severed, then -EPIPE is returned to cause a
+ * hang up (that is issued by the HVC console layer).
+ */
+static int hvc_iucv_write(struct hvc_iucv_private *priv,
+ char *buf, int count, int *has_more_data)
+{
+ struct iucv_tty_buffer *rb;
+ int written;
+ int rc;
+
+ /* Immediately return if there is no IUCV connection */
+ if (priv->iucv_state == IUCV_DISCONN)
+ return 0;
+
+ /* If the IUCV path has been severed, return -EPIPE to inform the
+ * hvc console layer to hang up the tty device. */
+ if (priv->iucv_state == IUCV_SEVERED)
+ return -EPIPE;
+
+ /* check if there are pending messages */
+ if (list_empty(&priv->tty_inqueue))
+ return 0;
+
+ /* receive a iucv message and flip data to the tty (ldisc) */
+ rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
+
+ written = 0;
+ if (!rb->mbuf) { /* message not yet received ... */
+ /* allocate mem to store msg data; if no memory is available
+ * then leave the buffer on the list and re-try later */
+ rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
+ if (!rb->mbuf)
+ return -ENOMEM;
+
+ rc = __iucv_message_receive(priv->path, &rb->msg, 0,
+ rb->mbuf, rb->msg.length, NULL);
+ switch (rc) {
+ case 0: /* Successful */
+ break;
+ case 2: /* No message found */
+ case 9: /* Message purged */
+ break;
+ default:
+ written = -EIO;
+ }
+ /* remove buffer if an error has occured or received data
+ * is not correct */
+ if (rc || (rb->mbuf->version != MSG_VERSION) ||
+ (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
+ goto out_remove_buffer;
+ }
+
+ switch (rb->mbuf->type) {
+ case MSG_TYPE_DATA:
+ written = min_t(int, rb->mbuf->datalen - rb->offset, count);
+ memcpy(buf, rb->mbuf->data + rb->offset, written);
+ if (written < (rb->mbuf->datalen - rb->offset)) {
+ rb->offset += written;
+ *has_more_data = 1;
+ goto out_written;
+ }
+ break;
+
+ case MSG_TYPE_WINSIZE:
+ if (rb->mbuf->datalen != sizeof(struct winsize))
+ break;
+ hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data));
+ break;
+
+ case MSG_TYPE_ERROR: /* ignored ... */
+ case MSG_TYPE_TERMENV: /* ignored ... */
+ case MSG_TYPE_TERMIOS: /* ignored ... */
+ break;
+ }
+
+out_remove_buffer:
+ list_del(&rb->list);
+ destroy_tty_buffer(rb);
+ *has_more_data = !list_empty(&priv->tty_inqueue);
+
+out_written:
+ return written;
+}
+
+/**
+ * hvc_iucv_get_chars() - HVC get_chars operation.
+ * @vtermno: HVC virtual terminal number.
+ * @buf: Pointer to a buffer to store data
+ * @count: Size of buffer available for writing
+ *
+ * The hvc_console thread calls this method to read characters from
+ * the terminal backend. If an IUCV communication path has been established,
+ * pending IUCV messages are received and data is copied into buffer @buf
+ * up to @count bytes.
+ *
+ * Locking: The routine gets called under an irqsave() spinlock; and
+ * the routine locks the struct hvc_iucv_private->lock to call
+ * helper functions.
+ */
+static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
+ int written;
+ int has_more_data;
+
+ if (count <= 0)
+ return 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ spin_lock(&priv->lock);
+ has_more_data = 0;
+ written = hvc_iucv_write(priv, buf, count, &has_more_data);
+ spin_unlock(&priv->lock);
+
+ /* if there are still messages on the queue... schedule another run */
+ if (has_more_data)
+ hvc_kick();
+
+ return written;
+}
+
+/**
+ * hvc_iucv_send() - Send an IUCV message containing terminal data.
+ * @priv: Pointer to struct hvc_iucv_private instance.
+ * @buf: Buffer containing data to send.
+ * @size: Size of buffer and amount of data to send.
+ *
+ * If an IUCV communication path is established, the function copies the buffer
+ * data to a newly allocated struct iucv_tty_buffer element, sends the data and
+ * puts the element to the outqueue.
+ *
+ * If there is no IUCV communication path established, the function returns 0.
+ * If an existing IUCV communicaton path has been severed, the function returns
+ * -EPIPE (can be passed to HVC layer to cause a tty hangup).
+ */
+static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
+ int count)
+{
+ struct iucv_tty_buffer *sb;
+ int rc;
+ u16 len;
+
+ if (priv->iucv_state == IUCV_SEVERED)
+ return -EPIPE;
+
+ if (priv->iucv_state == IUCV_DISCONN)
+ return 0;
+
+ len = min_t(u16, MSG_MAX_DATALEN, count);
+
+ /* allocate internal buffer to store msg data and also compute total
+ * message length */
+ sb = alloc_tty_buffer(len, GFP_ATOMIC);
+ if (!sb)
+ return -ENOMEM;
+
+ sb->mbuf->datalen = len;
+ memcpy(sb->mbuf->data, buf, len);
+
+ list_add_tail(&sb->list, &priv->tty_outqueue);
+
+ rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
+ (void *) sb->mbuf, sb->msg.length);
+ if (rc) {
+ list_del(&sb->list);
+ destroy_tty_buffer(sb);
+ len = 0;
+ }
+
+ return len;
+}
+
+/**
+ * hvc_iucv_put_chars() - HVC put_chars operation.
+ * @vtermno: HVC virtual terminal number.
+ * @buf: Pointer to an buffer to read data from
+ * @count: Size of buffer available for reading
+ *
+ * The hvc_console thread calls this method to write characters from
+ * to the terminal backend.
+ * The function calls hvc_iucv_send() under the lock of the
+ * struct hvc_iucv_private instance that corresponds to the tty @vtermno.
+ *
+ * Locking: The method gets called under an irqsave() spinlock; and
+ * locks struct hvc_iucv_private->lock.
+ */
+static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
+ int sent;
+
+ if (count <= 0)
+ return 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ spin_lock(&priv->lock);
+ sent = hvc_iucv_send(priv, buf, count);
+ spin_unlock(&priv->lock);
+
+ return sent;
+}
+
+/**
+ * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
+ * @hp: Pointer to the HVC device (struct hvc_struct)
+ * @id: Additional data (originally passed to hvc_alloc): the index of an struct
+ * hvc_iucv_private instance.
+ *
+ * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private
+ * instance that is derived from @id. Always returns 0.
+ *
+ * Locking: struct hvc_iucv_private->lock, spin_lock_bh
+ */
+static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
+{
+ struct hvc_iucv_private *priv;
+
+ priv = hvc_iucv_get_private(id);
+ if (!priv)
+ return 0;
+
+ spin_lock_bh(&priv->lock);
+ priv->tty_state = TTY_OPENED;
+ spin_unlock_bh(&priv->lock);
+
+ return 0;
+}
+
+/**
+ * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed.
+ * @priv: Pointer to the struct hvc_iucv_private instance.
+ *
+ * The functions severs the established IUCV communication path (if any), and
+ * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally,
+ * the functions resets the states to TTY_CLOSED and IUCV_DISCONN.
+ */
+static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
+{
+ destroy_tty_buffer_list(&priv->tty_outqueue);
+ destroy_tty_buffer_list(&priv->tty_inqueue);
+
+ priv->tty_state = TTY_CLOSED;
+ priv->iucv_state = IUCV_DISCONN;
+}
+
+/**
+ * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups.
+ * @hp: Pointer to the HVC device (struct hvc_struct)
+ * @id: Additional data (originally passed to hvc_alloc): the index of an struct
+ * hvc_iucv_private instance.
+ *
+ * This routine notifies the HVC backend that a tty hangup (carrier loss,
+ * virtual or otherwise) has occured.
+ *
+ * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep
+ * an existing IUCV communication path established.
+ * (Background: vhangup() is called from user space (by getty or login) to
+ * disable writing to the tty by other applications).
+ *
+ * If the tty has been opened (e.g. getty) and an established IUCV path has been
+ * severed (we caused the tty hangup in that case), then the functions invokes
+ * hvc_iucv_cleanup() to clean up.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
+{
+ struct hvc_iucv_private *priv;
+
+ priv = hvc_iucv_get_private(id);
+ if (!priv)
+ return;
+
+ spin_lock_bh(&priv->lock);
+ /* NOTE: If the hangup was scheduled by ourself (from the iucv
+ * path_servered callback [IUCV_SEVERED]), then we have to
+ * finally clean up the tty backend structure and set state to
+ * TTY_CLOSED.
+ *
+ * If the tty was hung up otherwise (e.g. vhangup()), then we
+ * ignore this hangup and keep an established IUCV path open...
+ * (...the reason is that we are not able to connect back to the
+ * client if we disconnect on hang up) */
+ priv->tty_state = TTY_CLOSED;
+
+ if (priv->iucv_state == IUCV_SEVERED)
+ hvc_iucv_cleanup(priv);
+ spin_unlock_bh(&priv->lock);
+}
+
+/**
+ * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
+ * @hp: Pointer to the HVC device (struct hvc_struct)
+ * @id: Additional data (originally passed to hvc_alloc):
+ * the index of an struct hvc_iucv_private instance.
+ *
+ * This routine notifies the HVC backend that the last tty device file
+ * descriptor has been closed.
+ * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private
+ * instance.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
+{
+ struct hvc_iucv_private *priv;
+ struct iucv_path *path;
+
+ priv = hvc_iucv_get_private(id);
+ if (!priv)
+ return;
+
+ spin_lock_bh(&priv->lock);
+ path = priv->path; /* save reference to IUCV path */
+ priv->path = NULL;
+ hvc_iucv_cleanup(priv);
+ spin_unlock_bh(&priv->lock);
+
+ /* sever IUCV path outside of priv->lock due to lock ordering of:
+ * priv->lock <--> iucv_table_lock */
+ if (path) {
+ iucv_path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
+/**
+ * hvc_iucv_path_pending() - IUCV handler to process a connection request.
+ * @path: Pending path (struct iucv_path)
+ * @ipvmid: Originator z/VM system identifier
+ * @ipuser: User specified data for this path
+ * (AF_IUCV: port/service name and originator port)
+ *
+ * The function uses the @ipuser data to check to determine if the pending
+ * path belongs to a terminal managed by this HVC backend.
+ * If the check is successful, then an additional check is done to ensure
+ * that a terminal cannot be accessed multiple times (only one connection
+ * to a terminal is allowed). In that particular case, the pending path is
+ * severed. If it is the first connection, the pending path is accepted and
+ * associated to the struct hvc_iucv_private. The iucv state is updated to
+ * reflect that a communication path has been established.
+ *
+ * Returns 0 if the path belongs to a terminal managed by the this HVC backend;
+ * otherwise returns -ENODEV in order to dispatch this path to other handlers.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static int hvc_iucv_path_pending(struct iucv_path *path,
+ u8 ipvmid[8], u8 ipuser[16])
+{
+ struct hvc_iucv_private *priv;
+ u8 nuser_data[16];
+ int i, rc;
+
+ priv = NULL;
+ for (i = 0; i < hvc_iucv_devices; i++)
+ if (hvc_iucv_table[i] &&
+ (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
+ priv = hvc_iucv_table[i];
+ break;
+ }
+
+ if (!priv)
+ return -ENODEV;
+
+ spin_lock(&priv->lock);
+
+ /* If the terminal is already connected or being severed, then sever
+ * this path to enforce that there is only ONE established communication
+ * path per terminal. */
+ if (priv->iucv_state != IUCV_DISCONN) {
+ iucv_path_sever(path, ipuser);
+ iucv_path_free(path);
+ goto out_path_handled;
+ }
+
+ /* accept path */
+ memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
+ memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
+ path->msglim = 0xffff; /* IUCV MSGLIMIT */
+ path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
+ rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
+ if (rc) {
+ iucv_path_sever(path, ipuser);
+ iucv_path_free(path);
+ goto out_path_handled;
+ }
+ priv->path = path;
+ priv->iucv_state = IUCV_CONNECTED;
+
+out_path_handled:
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+/**
+ * hvc_iucv_path_severed() - IUCV handler to process a path sever.
+ * @path: Pending path (struct iucv_path)
+ * @ipuser: User specified data for this path
+ * (AF_IUCV: port/service name and originator port)
+ *
+ * The function also severs the path (as required by the IUCV protocol) and
+ * sets the iucv state to IUCV_SEVERED for the associated struct
+ * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
+ * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
+ *
+ * If tty portion of the HVC is closed then clean up the outqueue in addition.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+{
+ struct hvc_iucv_private *priv = path->private;
+
+ spin_lock(&priv->lock);
+ priv->iucv_state = IUCV_SEVERED;
+
+ /* NOTE: If the tty has not yet been opened by a getty program
+ * (e.g. to see console messages), then cleanup the
+ * hvc_iucv_private structure to allow re-connects.
+ *
+ * If the tty has been opened, the get_chars() callback returns
+ * -EPIPE to signal the hvc console layer to hang up the tty. */
+ priv->path = NULL;
+ if (priv->tty_state == TTY_CLOSED)
+ hvc_iucv_cleanup(priv);
+ spin_unlock(&priv->lock);
+
+ /* finally sever path (outside of priv->lock due to lock ordering) */
+ iucv_path_sever(path, ipuser);
+ iucv_path_free(path);
+}
+
+/**
+ * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
+ * @path: Pending path (struct iucv_path)
+ * @msg: Pointer to the IUCV message
+ *
+ * The function stores an incoming message on the input queue for later
+ * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
+ * However, if the tty has not yet been opened, the message is rejected.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_msg_pending(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct hvc_iucv_private *priv = path->private;
+ struct iucv_tty_buffer *rb;
+
+ spin_lock(&priv->lock);
+
+ /* reject messages if tty has not yet been opened */
+ if (priv->tty_state == TTY_CLOSED) {
+ iucv_message_reject(path, msg);
+ goto unlock_return;
+ }
+
+ /* allocate buffer an empty buffer element */
+ rb = alloc_tty_buffer(0, GFP_ATOMIC);
+ if (!rb) {
+ iucv_message_reject(path, msg);
+ goto unlock_return; /* -ENOMEM */
+ }
+ rb->msg = *msg;
+
+ list_add_tail(&rb->list, &priv->tty_inqueue);
+
+ hvc_kick(); /* wakup hvc console thread */
+
+unlock_return:
+ spin_unlock(&priv->lock);
+}
+
+/**
+ * hvc_iucv_msg_complete() - IUCV handler to process message completion
+ * @path: Pending path (struct iucv_path)
+ * @msg: Pointer to the IUCV message
+ *
+ * The function is called upon completion of message delivery and the
+ * message is removed from the outqueue. Additional delivery information
+ * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and
+ * purged messages (0x010000 (IPADPGNR)).
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_msg_complete(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct hvc_iucv_private *priv = path->private;
+ struct iucv_tty_buffer *ent, *next;
+ LIST_HEAD(list_remove);
+
+ spin_lock(&priv->lock);
+ list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
+ if (ent->msg.id == msg->id) {
+ list_move(&ent->list, &list_remove);
+ break;
+ }
+ spin_unlock(&priv->lock);
+ destroy_tty_buffer_list(&list_remove);
+}
+
+
+/* HVC operations */
+static struct hv_ops hvc_iucv_ops = {
+ .get_chars = hvc_iucv_get_chars,
+ .put_chars = hvc_iucv_put_chars,
+ .notifier_add = hvc_iucv_notifier_add,
+ .notifier_del = hvc_iucv_notifier_del,
+ .notifier_hangup = hvc_iucv_notifier_hangup,
+};
+
+/**
+ * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
+ * @id: hvc_iucv_table index
+ *
+ * This function allocates a new hvc_iucv_private struct and put the
+ * instance into hvc_iucv_table at index @id.
+ * Returns 0 on success; otherwise non-zero.
+ */
+static int __init hvc_iucv_alloc(int id)
+{
+ struct hvc_iucv_private *priv;
+ char name[9];
+ int rc;
+
+ priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->tty_outqueue);
+ INIT_LIST_HEAD(&priv->tty_inqueue);
+
+ /* Finally allocate hvc */
+ priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
+ HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE);
+ if (IS_ERR(priv->hvc)) {
+ rc = PTR_ERR(priv->hvc);
+ kfree(priv);
+ return rc;
+ }
+
+ /* setup iucv related information */
+ snprintf(name, 9, "ihvc%-4d", id);
+ memcpy(priv->srv_name, name, 8);
+ ASCEBC(priv->srv_name, 8);
+
+ hvc_iucv_table[id] = priv;
+ return 0;
+}
+
+/**
+ * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV
+ */
+static int __init hvc_iucv_init(void)
+{
+ int rc, i;
+
+ if (!MACHINE_IS_VM) {
+ pr_warning("The z/VM IUCV Hypervisor console cannot be "
+ "used without z/VM.\n");
+ return -ENODEV;
+ }
+
+ if (!hvc_iucv_devices)
+ return -ENODEV;
+
+ if (hvc_iucv_devices > MAX_HVC_IUCV_LINES)
+ return -EINVAL;
+
+ hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
+ sizeof(struct iucv_tty_buffer),
+ 0, 0, NULL);
+ if (!hvc_iucv_buffer_cache) {
+ pr_err("Not enough memory for driver initialization "
+ "(rs=%d).\n", 1);
+ return -ENOMEM;
+ }
+
+ hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
+ hvc_iucv_buffer_cache);
+ if (!hvc_iucv_mempool) {
+ pr_err("Not enough memory for driver initialization "
+ "(rs=%d).\n", 2);
+ kmem_cache_destroy(hvc_iucv_buffer_cache);
+ return -ENOMEM;
+ }
+
+ /* allocate hvc_iucv_private structs */
+ for (i = 0; i < hvc_iucv_devices; i++) {
+ rc = hvc_iucv_alloc(i);
+ if (rc) {
+ pr_err("Could not create new z/VM IUCV HVC backend "
+ "rc=%d.\n", rc);
+ goto out_error_hvc;
+ }
+ }
+
+ /* register IUCV callback handler */
+ rc = iucv_register(&hvc_iucv_handler, 0);
+ if (rc) {
+ pr_err("Could not register iucv handler (rc=%d).\n", rc);
+ goto out_error_iucv;
+ }
+
+ return 0;
+
+out_error_iucv:
+ iucv_unregister(&hvc_iucv_handler, 0);
+out_error_hvc:
+ for (i = 0; i < hvc_iucv_devices; i++)
+ if (hvc_iucv_table[i]) {
+ if (hvc_iucv_table[i]->hvc)
+ hvc_remove(hvc_iucv_table[i]->hvc);
+ kfree(hvc_iucv_table[i]);
+ }
+ mempool_destroy(hvc_iucv_mempool);
+ kmem_cache_destroy(hvc_iucv_buffer_cache);
+ return rc;
+}
+
+/**
+ * hvc_iucv_console_init() - Early console initialization
+ */
+static int __init hvc_iucv_console_init(void)
+{
+ if (!MACHINE_IS_VM || !hvc_iucv_devices)
+ return -ENODEV;
+ return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops);
+}
+
+/**
+ * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
+ * @val: Parameter value (numeric)
+ */
+static int __init hvc_iucv_config(char *val)
+{
+ return strict_strtoul(val, 10, &hvc_iucv_devices);
+}
+
+
+module_init(hvc_iucv_init);
+console_initcall(hvc_iucv_console_init);
+__setup("hvc_iucv=", hvc_iucv_config);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HVC back-end for z/VM IUCV.");
+MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 363bd1303d21..570ae59c1d5e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1898,15 +1898,19 @@ restart_cb:
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
/* Process finished ERP request. */
if (cqr->refers) {
+ spin_lock_bh(&block->queue_lock);
__dasd_block_process_erp(block, cqr);
+ spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
goto restart_cb;
}
/* call the callback function */
+ spin_lock_irq(&block->request_queue_lock);
cqr->endclk = get_clock();
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
+ spin_unlock_irq(&block->request_queue_lock);
}
return rc;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 921443b01d16..2ef25731d197 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -23,6 +23,7 @@
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
+#define DASD_BUS_ID_SIZE 20
#include "dasd_int.h"
@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache);
*/
struct dasd_devmap {
struct list_head list;
- char bus_id[BUS_ID_SIZE];
+ char bus_id[DASD_BUS_ID_SIZE];
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
@@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id)
int hash, i;
hash = 0;
- for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++)
+ for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
hash += *bus_id;
return hash & 0xff;
}
@@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) {
int from, from_id0, from_id1;
int to, to_id0, to_id1;
int features, rc;
- char bus_id[BUS_ID_SIZE+1], *str;
+ char bus_id[DASD_BUS_ID_SIZE+1], *str;
str = parsestring;
rc = dasd_busid(&str, &from_id0, &from_id1, &from);
@@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features)
devmap = NULL;
hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list)
- if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
+ if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
devmap = tmp;
break;
}
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strncpy(new->bus_id, bus_id, BUS_ID_SIZE);
+ strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
@@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id)
devmap = ERR_PTR(-ENODEV);
hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
- if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
+ if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
devmap = tmp;
break;
}
@@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev)
}
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
- cdev->dev.driver_data = device;
+ dev_set_drvdata(&cdev->dev, device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return device;
@@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device)
/* Disconnect dasd_device structure from ccw_device structure. */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- device->cdev->dev.driver_data = NULL;
+ dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/*
@@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device)
struct dasd_device *
dasd_device_from_cdev_locked(struct ccw_device *cdev)
{
- struct dasd_device *device = cdev->dev.driver_data;
+ struct dasd_device *device = dev_get_drvdata(&cdev->dev);
if (!device)
return ERR_PTR(-ENODEV);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 2e60d5f968c8..bd2c52e20762 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
/* service information message SIM */
- if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) &&
+ if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) &&
((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
dasd_3990_erp_handle_sim(device, irb->ecw);
dasd_schedule_device_bh(device);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 9088de84b45d..bf6fd348f20e 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off,
#ifdef CONFIG_DASD_PROFILE
static char *
-dasd_statistics_array(char *str, unsigned int *array, int shift)
+dasd_statistics_array(char *str, unsigned int *array, int factor)
{
int i;
for (i = 0; i < 32; i++) {
- str += sprintf(str, "%7d ", array[i] >> shift);
+ str += sprintf(str, "%7d ", array[i] / factor);
if (i == 15)
str += sprintf(str, "\n");
}
@@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off,
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t *prof;
char *str;
- int shift;
+ int factor;
/* check for active profiling */
if (dasd_profile_level == DASD_PROFILE_OFF) {
@@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off,
prof = &dasd_global_profile;
/* prevent couter 'overflow' on output */
- for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++);
+ for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
+ factor *= 10);
str = page;
str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
- str += sprintf(str, "with %d sectors(512B each)\n",
+ str += sprintf(str, "with %u sectors(512B each)\n",
prof->dasd_io_sects);
+ str += sprintf(str, "Scale Factor is %d\n", factor);
str += sprintf(str,
" __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k "
@@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off,
" __1G __2G __4G " " _>4G\n");
str += sprintf(str, "Histogram of sizes (512B secs)\n");
- str = dasd_statistics_array(str, prof->dasd_io_secs, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_secs, factor);
str += sprintf(str, "Histogram of I/O times (microseconds)\n");
- str = dasd_statistics_array(str, prof->dasd_io_times, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_times, factor);
str += sprintf(str, "Histogram of I/O times per sector\n");
- str = dasd_statistics_array(str, prof->dasd_io_timps, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_timps, factor);
str += sprintf(str, "Histogram of I/O time till ssch\n");
- str = dasd_statistics_array(str, prof->dasd_io_time1, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_time1, factor);
str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
- str = dasd_statistics_array(str, prof->dasd_io_time2, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_time2, factor);
str += sprintf(str, "Histogram of I/O time between ssch "
"and irq per sector\n");
- str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor);
str += sprintf(str, "Histogram of I/O time between irq and end\n");
- str = dasd_statistics_array(str, prof->dasd_io_time3, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_time3, factor);
str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
- str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift);
+ str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor);
len = str - page;
#else
len = sprintf(page, "Statistics are not activated in this kernel\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 63f26a135fe5..26ffc6ab441d 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -4,6 +4,9 @@
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
*/
+#define KMSG_COMPONENT "dcssblk"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h>
@@ -17,19 +20,10 @@
#include <linux/interrupt.h>
#include <asm/s390_rdev.h>
-//#define DCSSBLK_DEBUG /* Debug messages on/off */
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
#define DCSSBLK_PARM_LEN 400
-
-#ifdef DCSSBLK_DEBUG
-#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
-#else
-#define PRINT_DEBUG(x...) do {} while (0)
-#endif
-#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
-#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
-#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
+#define DCSS_BUS_ID_SIZE 20
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static int dcssblk_release(struct gendisk *disk, fmode_t mode);
@@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = {
struct dcssblk_dev_info {
struct list_head lh;
struct device dev;
- char segment_name[BUS_ID_SIZE];
+ char segment_name[DCSS_BUS_ID_SIZE];
atomic_t use_count;
struct gendisk *gd;
unsigned long start;
@@ -65,7 +59,7 @@ struct dcssblk_dev_info {
struct segment_info {
struct list_head lh;
- char segment_name[BUS_ID_SIZE];
+ char segment_name[DCSS_BUS_ID_SIZE];
unsigned long start;
unsigned long end;
int segment_type;
@@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
/* check continuity */
for (i = 0; i < dev_info->num_of_segments - 1; i++) {
if ((sort_list[i].end + 1) != sort_list[i+1].start) {
- PRINT_ERR("Segment %s is not contiguous with "
- "segment %s\n",
- sort_list[i].segment_name,
- sort_list[i+1].segment_name);
+ pr_err("Adjacent DCSSs %s and %s are not "
+ "contiguous\n", sort_list[i].segment_name,
+ sort_list[i+1].segment_name);
rc = -EINVAL;
goto out;
}
@@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
!(sort_list[i+1].segment_type &
SEGMENT_EXCLUSIVE) ||
(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
- PRINT_ERR("Segment %s has different type from "
- "segment %s\n",
- sort_list[i].segment_name,
- sort_list[i+1].segment_name);
+ pr_err("DCSS %s and DCSS %s have "
+ "incompatible types\n",
+ sort_list[i].segment_name,
+ sort_list[i+1].segment_name);
rc = -EINVAL;
goto out;
}
@@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
} else if (inbuf[0] == '0') {
/* reload segments in exclusive mode */
if (dev_info->segment_type == SEG_TYPE_SC) {
- PRINT_ERR("Segment type SC (%s) cannot be loaded in "
- "non-shared mode\n", dev_info->segment_name);
+ pr_err("DCSS %s is of type SC and cannot be "
+ "loaded as exclusive-writable\n",
+ dev_info->segment_name);
rc = -EINVAL;
goto out;
}
@@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
goto out;
removeseg:
- PRINT_ERR("Could not reload segment(s) of the device %s, removing "
- "segment(s) now!\n",
- dev_info->segment_name);
+ pr_err("DCSS device %s is removed after a failed access mode "
+ "change\n", dev_info->segment_name);
temp = entry;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (entry != temp)
@@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
if (inbuf[0] == '1') {
if (atomic_read(&dev_info->use_count) == 0) {
// device is idle => we save immediately
- PRINT_INFO("Saving segment(s) of the device %s\n",
- dev_info->segment_name);
+ pr_info("All DCSSs that map to device %s are "
+ "saved\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_save(entry->segment_name);
}
} else {
// device is busy => we save it when it becomes
// idle in dcssblk_release
- PRINT_INFO("Device %s is currently busy, segment(s) "
- "will be saved when it becomes idle...\n",
- dev_info->segment_name);
+ pr_info("Device %s is in use, its DCSSs will be "
+ "saved when it becomes idle\n",
+ dev_info->segment_name);
dev_info->save_pending = 1;
}
} else if (inbuf[0] == '0') {
@@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
// device is busy & the user wants to undo his save
// request
dev_info->save_pending = 0;
- PRINT_INFO("Pending save for segment(s) of the device "
- "%s deactivated\n",
- dev_info->segment_name);
+ pr_info("A pending save request for device %s "
+ "has been canceled\n",
+ dev_info->segment_name);
}
} else {
up_write(&dcssblk_devices_sem);
@@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
- PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, "
- "capacity = %lu (512 Byte) sectors\n", local_buf,
- seg_byte_size, seg_byte_size >> 9);
+ pr_info("Loaded %s with total size %lu bytes and capacity %lu "
+ "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
dev_info->save_pending = 0;
dev_info->is_shared = 1;
@@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) {
up_write(&dcssblk_devices_sem);
- PRINT_WARN("Device %s is not loaded!\n", local_buf);
+ pr_warning("Device %s cannot be removed because it is not a "
+ "known device\n", local_buf);
rc = -ENODEV;
goto out_buf;
}
if (atomic_read(&dev_info->use_count) != 0) {
up_write(&dcssblk_devices_sem);
- PRINT_WARN("Device %s is in use!\n", local_buf);
+ pr_warning("Device %s cannot be removed while it is in "
+ "use\n", local_buf);
rc = -EBUSY;
goto out_buf;
}
@@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
down_write(&dcssblk_devices_sem);
if (atomic_dec_and_test(&dev_info->use_count)
&& (dev_info->save_pending)) {
- PRINT_INFO("Device %s became idle and is being saved now\n",
- dev_info->segment_name);
+ pr_info("Device %s has become idle and is being saved "
+ "now\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_save(entry->segment_name);
}
@@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
case SEG_TYPE_SC:
/* cannot write to these segments */
if (bio_data_dir(bio) == WRITE) {
- PRINT_WARN("rejecting write to ro device %s\n",
+ pr_warning("Writing to %s failed because it "
+ "is a read-only device\n",
dev_name(&dev_info->dev));
goto fail;
}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 03916989ed2d..76814f3e898a 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -25,6 +25,9 @@
* generic hard disk support to replace ad-hoc partitioning
*/
+#define KMSG_COMPONENT "xpram"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h> /* isdigit, isxdigit */
@@ -42,12 +45,6 @@
#define XPRAM_DEVS 1 /* one partition */
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
-#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x)
-#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x)
-#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x)
-#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
-
-
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
@@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
/* Check number of devices. */
if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
- PRINT_ERR("invalid number %d of devices\n",devs);
+ pr_err("%d is not a valid number of XPRAM devices\n",devs);
return -EINVAL;
}
xpram_devs = devs;
@@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages)
mem_auto_no++;
}
- PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs);
+ pr_info(" number of devices (partitions): %d \n", xpram_devs);
for (i = 0; i < xpram_devs; i++) {
if (xpram_sizes[i])
- PRINT_INFO(" size of partition %d: %u kB\n",
- i, xpram_sizes[i]);
+ pr_info(" size of partition %d: %u kB\n",
+ i, xpram_sizes[i]);
else
- PRINT_INFO(" size of partition %d to be set "
- "automatically\n",i);
+ pr_info(" size of partition %d to be set "
+ "automatically\n",i);
}
- PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n",
- mem_needed);
- PRINT_DEBUG(" partitions to be sized automatically: %d\n",
- mem_auto_no);
+ pr_info(" memory needed (for sized partitions): %lu kB\n",
+ mem_needed);
+ pr_info(" partitions to be sized automatically: %d\n",
+ mem_auto_no);
if (mem_needed > pages * 4) {
- PRINT_ERR("Not enough expanded memory available\n");
+ pr_err("Not enough expanded memory available\n");
return -EINVAL;
}
@@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages)
*/
if (mem_auto_no) {
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
- PRINT_INFO(" automatically determined "
- "partition size: %lu kB\n", mem_auto);
+ pr_info(" automatically determined "
+ "partition size: %lu kB\n", mem_auto);
for (i = 0; i < xpram_devs; i++)
if (xpram_sizes[i] == 0)
xpram_sizes[i] = mem_auto;
@@ -405,12 +402,12 @@ static int __init xpram_init(void)
/* Find out size of expanded memory. */
if (xpram_present() != 0) {
- PRINT_WARN("No expanded memory available\n");
+ pr_err("No expanded memory available\n");
return -ENODEV;
}
xpram_pages = xpram_highest_page_index() + 1;
- PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
- xpram_pages, (unsigned long) xpram_pages*4);
+ pr_info(" %u pages expanded memory found (%lu KB).\n",
+ xpram_pages, (unsigned long) xpram_pages*4);
rc = xpram_setup_sizes(xpram_pages);
if (rc)
return rc;
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 35fd8dfcaaa6..97e63cf46944 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -7,6 +7,9 @@
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
+#define KMSG_COMPONENT "monreader"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -24,19 +27,6 @@
#include <asm/ebcdic.h>
#include <asm/extmem.h>
-//#define MON_DEBUG /* Debug messages on/off */
-
-#define MON_NAME "monreader"
-
-#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x)
-#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x)
-#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x)
-
-#ifdef MON_DEBUG
-#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x)
-#else
-#define P_DEBUG(x...) do {} while (0)
-#endif
#define MON_COLLECT_SAMPLE 0x80
#define MON_COLLECT_EVENT 0x40
@@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg,
} else
monmsg->replied_msglim = 1;
if (rc) {
- P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc);
+ pr_err("Reading monitor data failed with rc=%i\n", rc);
return -EIO;
}
return 0;
@@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
- P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]);
+ pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
+ ipuser[0]);
iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue);
@@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path,
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
- P_WARNING("IUCV message pending, message limit (%i) reached\n",
- MON_MSGLIM);
+ pr_warning("The read queue for monitor data is full\n");
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
}
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
@@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp)
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
- P_ERROR("iucv connection to *MONITOR failed with "
- "IPUSER SEVER code = %i\n", rc);
+ pr_err("Connecting to the z/VM *MONITOR system service "
+ "failed with rc=%i\n", rc);
rc = -EIO;
goto out_path;
}
@@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp)
*/
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
- P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
+ pr_warning("Disconnecting the z/VM *MONITOR system service "
+ "failed with rc=%i\n", rc);
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
@@ -469,7 +460,8 @@ static int __init mon_init(void)
int rc;
if (!MACHINE_IS_VM) {
- P_ERROR("not running under z/VM, driver not loaded\n");
+ pr_err("The z/VM *MONITOR record device driver cannot be "
+ "loaded without z/VM\n");
return -ENODEV;
}
@@ -478,7 +470,8 @@ static int __init mon_init(void)
*/
rc = iucv_register(&monreader_iucv_handler, 1);
if (rc) {
- P_ERROR("failed to register with iucv driver\n");
+ pr_err("The z/VM *MONITOR record device driver failed to "
+ "register with IUCV\n");
return rc;
}
@@ -488,8 +481,8 @@ static int __init mon_init(void)
goto out_iucv;
}
if (rc != SEG_TYPE_SC) {
- P_ERROR("segment %s has unsupported type, should be SC\n",
- mon_dcss_name);
+ pr_err("The specified *MONITOR DCSS %s does not have the "
+ "required type SC\n", mon_dcss_name);
rc = -EINVAL;
goto out_iucv;
}
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 4d71aa8c1a79..c7d7483bab9a 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -8,6 +8,9 @@
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/
+#define KMSG_COMPONENT "monwriter"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
if (rc <= 0)
return rc;
+ pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5)
return -EPERM;
- printk("DIAG X'DC' error with return code: %i\n", rc);
return -EINVAL;
}
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index ec9c0bcf66ee..506390496416 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -6,6 +6,9 @@
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -16,9 +19,8 @@
#include <linux/memory.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
-#include "sclp.h"
-#define TAG "sclp_cmd: "
+#include "sclp.h"
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
@@ -169,8 +171,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
/* Check response. */
if (request->status != SCLP_REQ_DONE) {
- printk(KERN_WARNING TAG "sync request failed "
- "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status);
+ pr_warning("sync request failed (cmd=0x%08x, "
+ "status=0x%02x)\n", cmd, request->status);
rc = -EIO;
}
out:
@@ -224,8 +226,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info)
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
- printk(KERN_WARNING TAG "readcpuinfo failed "
- "(response=0x%04x)\n", sccb->header.response_code);
+ pr_warning("readcpuinfo failed (response=0x%04x)\n",
+ sccb->header.response_code);
rc = -EIO;
goto out;
}
@@ -262,8 +264,9 @@ static int do_cpu_configure(sclp_cmdw_t cmd)
case 0x0120:
break;
default:
- printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, "
- "response=0x%04x)\n", cmd, sccb->header.response_code);
+ pr_warning("configure cpu failed (cmd=0x%08x, "
+ "response=0x%04x)\n", cmd,
+ sccb->header.response_code);
rc = -EIO;
break;
}
@@ -626,9 +629,9 @@ static int do_chp_configure(sclp_cmdw_t cmd)
case 0x0450:
break;
default:
- printk(KERN_WARNING TAG "configure channel-path failed "
- "(cmd=0x%08x, response=0x%04x)\n", cmd,
- sccb->header.response_code);
+ pr_warning("configure channel-path failed "
+ "(cmd=0x%08x, response=0x%04x)\n", cmd,
+ sccb->header.response_code);
rc = -EIO;
break;
}
@@ -695,8 +698,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
- printk(KERN_WARNING TAG "read channel-path info failed "
- "(response=0x%04x)\n", sccb->header.response_code);
+ pr_warning("read channel-path info failed "
+ "(response=0x%04x)\n", sccb->header.response_code);
rc = -EIO;
goto out;
}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 4cebd6ee6d27..b497afe061cc 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -5,15 +5,17 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
+#define KMSG_COMPONENT "sclp_config"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/sysdev.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
-#include "sclp.h"
-#define TAG "sclp_config: "
+#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
@@ -31,7 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
int cpu;
struct sys_device *sysdev;
- printk(KERN_WARNING TAG "cpu capability changed.\n");
+ pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
sysdev = get_cpu_sysdev(cpu);
@@ -78,7 +80,7 @@ static int __init sclp_conf_init(void)
return rc;
if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
- printk(KERN_WARNING TAG "no configuration management.\n");
+ pr_warning("no configuration management.\n");
sclp_unregister(&sclp_conf_register);
rc = -ENOSYS;
}
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index d887bd261d28..62c2647f37f4 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -7,6 +7,9 @@
* Michael Ernst <mernst@de.ibm.com>
*/
+#define KMSG_COMPONENT "sclp_cpi"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
@@ -20,6 +23,7 @@
#include <linux/completion.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
+
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_cpi_sys.h"
@@ -150,16 +154,16 @@ static int cpi_req(void)
wait_for_completion(&completion);
if (req->status != SCLP_REQ_DONE) {
- printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n",
- req->status);
+ pr_warning("request failed (status=0x%02x)\n",
+ req->status);
rc = -EIO;
goto out_free_req;
}
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
if (response != 0x0020) {
- printk(KERN_WARNING "cpi: failed with "
- "response code 0x%x\n", response);
+ pr_warning("request failed with response code 0x%x\n",
+ response);
rc = -EIO;
}
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 8b854857ba07..6a1c58dc61a7 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -5,15 +5,18 @@
* Author(s): Michael Holzheu
*/
+#define KMSG_COMPONENT "sclp_sdias"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/sched.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/ipl.h>
+
#include "sclp.h"
#include "sclp_rw.h"
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
-#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
#define SDIAS_RETRIES 300
#define SDIAS_SLEEP_TICKS 50
@@ -131,7 +134,7 @@ int sclp_sdias_blk_count(void)
rc = sdias_sclp_send(&request);
if (rc) {
- ERROR_MSG("sclp_send failed for get_nr_blocks\n");
+ pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
@@ -145,7 +148,8 @@ int sclp_sdias_blk_count(void)
rc = sccb.evbuf.blk_cnt;
break;
default:
- ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
+ pr_err("SCLP error: %x\n",
+ sccb.evbuf.event_status);
rc = -EIO;
goto out;
}
@@ -201,7 +205,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
rc = sdias_sclp_send(&request);
if (rc) {
- ERROR_MSG("sclp_send failed: %x\n", rc);
+ pr_err("sclp_send failed: %x\n", rc);
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
@@ -219,9 +223,9 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
case EVSTATE_NO_DATA:
TRACE("no data\n");
default:
- ERROR_MSG("Error from SCLP while copying hsa. "
- "Event status = %x\n",
- sccb.evbuf.event_status);
+ pr_err("Error from SCLP while copying hsa. "
+ "Event status = %x\n",
+ sccb.evbuf.event_status);
rc = -EIO;
}
out:
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 9854f19f5e62..a839aa531d7c 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -583,23 +583,6 @@ sclp_vt220_chars_in_buffer(struct tty_struct *tty)
return count;
}
-static void
-__sclp_vt220_flush_buffer(void)
-{
- unsigned long flags;
-
- sclp_vt220_emit_current();
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- if (timer_pending(&sclp_vt220_timer))
- del_timer(&sclp_vt220_timer);
- while (sclp_vt220_outqueue_count > 0) {
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
- sclp_sync_wait();
- spin_lock_irqsave(&sclp_vt220_lock, flags);
- }
- spin_unlock_irqrestore(&sclp_vt220_lock, flags);
-}
-
/*
* Pass on all buffers to the hardware. Return only when there are no more
* buffers pending.
@@ -745,6 +728,22 @@ sclp_vt220_con_device(struct console *c, int *index)
return sclp_vt220_driver;
}
+static void __sclp_vt220_flush_buffer(void)
+{
+ unsigned long flags;
+
+ sclp_vt220_emit_current();
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ if (timer_pending(&sclp_vt220_timer))
+ del_timer(&sclp_vt220_timer);
+ while (sclp_vt220_outqueue_count > 0) {
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+ sclp_sync_wait();
+ spin_lock_irqsave(&sclp_vt220_lock, flags);
+ }
+ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+}
+
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 09e7d9bf438b..a6087cec55b4 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -11,12 +11,14 @@
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
+#define KMSG_COMPONENT "vmcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
@@ -26,8 +28,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
MODULE_DESCRIPTION("z/VM CP interface");
-#define PRINTK_HEADER "vmcp: "
-
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
@@ -41,13 +41,11 @@ static int vmcp_open(struct inode *inode, struct file *file)
if (!session)
return -ENOMEM;
- lock_kernel();
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
mutex_init(&session->mutex);
file->private_data = session;
- unlock_kernel();
return nonseekable_open(inode, file);
}
@@ -193,7 +191,8 @@ static int __init vmcp_init(void)
int ret;
if (!MACHINE_IS_VM) {
- PRINT_WARN("z/VM CP interface is only available under z/VM\n");
+ pr_warning("The z/VM CP interface device driver cannot be "
+ "loaded without z/VM\n");
return -ENODEV;
}
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 24762727bc27..aabbeb909cc6 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -10,6 +10,10 @@
* Stefan Weinhuber <wein@de.ibm.com>
*
*/
+
+#define KMSG_COMPONENT "vmlogrdr"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -28,8 +32,6 @@
#include <linux/smp_lock.h>
#include <linux/string.h>
-
-
MODULE_AUTHOR
("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
" Stefan Weinhuber (wein@de.ibm.com)");
@@ -174,8 +176,7 @@ static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
struct vmlogrdr_priv_t * logptr = path->private;
u8 reason = (u8) ipuser[8];
- printk (KERN_ERR "vmlogrdr: connection severed with"
- " reason %i\n", reason);
+ pr_err("vmlogrdr: connection severed with reason %i\n", reason);
iucv_path_sever(path, NULL);
kfree(path);
@@ -333,8 +334,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
if (ret)
- printk (KERN_WARNING "vmlogrdr: failed to start "
- "recording automatically\n");
+ pr_warning("vmlogrdr: failed to start "
+ "recording automatically\n");
}
/* create connection to the system service */
@@ -345,9 +346,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
logptr->system_service, NULL, NULL,
logptr);
if (connect_rc) {
- printk (KERN_ERR "vmlogrdr: iucv connection to %s "
- "failed with rc %i \n", logptr->system_service,
- connect_rc);
+ pr_err("vmlogrdr: iucv connection to %s "
+ "failed with rc %i \n",
+ logptr->system_service, connect_rc);
goto out_path;
}
@@ -388,8 +389,8 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
- printk (KERN_WARNING "vmlogrdr: failed to stop "
- "recording automatically\n");
+ pr_warning("vmlogrdr: failed to stop "
+ "recording automatically\n");
}
logptr->dev_in_use = 0;
@@ -823,8 +824,7 @@ static int __init vmlogrdr_init(void)
dev_t dev;
if (! MACHINE_IS_VM) {
- printk (KERN_ERR "vmlogrdr: not running under VM, "
- "driver not loaded.\n");
+ pr_err("not running under VM, driver not loaded.\n");
return -ENODEV;
}
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 9020eba620ee..5dcef81fc9d9 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -8,6 +8,9 @@
* Frank Munzert <munzert@de.ibm.com>
*/
+#define KMSG_COMPONENT "vmur"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/cdev.h>
#include <linux/smp_lock.h>
@@ -40,8 +43,6 @@ MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
MODULE_LICENSE("GPL");
-#define PRINTK_HEADER "vmur: "
-
static dev_t ur_first_dev_maj_min;
static struct class *vmur_class;
static struct debug_info *vmur_dbf;
@@ -987,7 +988,8 @@ static int __init ur_init(void)
dev_t dev;
if (!MACHINE_IS_VM) {
- PRINT_ERR("%s is only available under z/VM.\n", ur_banner);
+ pr_err("The %s cannot be loaded without z/VM\n",
+ ur_banner);
return -ENODEV;
}
@@ -1006,7 +1008,8 @@ static int __init ur_init(void)
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
if (rc) {
- PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc);
+ pr_err("Kernel function alloc_chrdev_region failed with "
+ "error code %d\n", rc);
goto fail_unregister_driver;
}
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
@@ -1016,7 +1019,7 @@ static int __init ur_init(void)
rc = PTR_ERR(vmur_class);
goto fail_unregister_region;
}
- PRINT_INFO("%s loaded.\n", ur_banner);
+ pr_info("%s loaded.\n", ur_banner);
return 0;
fail_unregister_region:
@@ -1034,7 +1037,7 @@ static void __exit ur_exit(void)
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
debug_unregister(vmur_dbf);
- PRINT_INFO("%s unloaded.\n", ur_banner);
+ pr_info("%s unloaded.\n", ur_banner);
}
module_init(ur_init);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7fd84be11931..eefc6611412e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -9,6 +9,9 @@
* Author(s): Michael Holzheu
*/
+#define KMSG_COMPONENT "zdump"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/utsname.h>
@@ -24,8 +27,6 @@
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
-#define MSG(x...) printk( KERN_ALERT x )
-#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
#define TO_USER 0
#define TO_KERNEL 1
@@ -563,19 +564,19 @@ static int __init sys_info_init(enum arch_id arch)
switch (arch) {
case ARCH_S390X:
- MSG("DETECTED 'S390X (64 bit) OS'\n");
+ pr_alert("DETECTED 'S390X (64 bit) OS'\n");
sys_info.sa_base = SAVE_AREA_BASE_S390X;
sys_info.sa_size = sizeof(struct save_area_s390x);
set_s390x_lc_mask(&sys_info.lc_mask);
break;
case ARCH_S390:
- MSG("DETECTED 'S390 (32 bit) OS'\n");
+ pr_alert("DETECTED 'S390 (32 bit) OS'\n");
sys_info.sa_base = SAVE_AREA_BASE_S390;
sys_info.sa_size = sizeof(struct save_area_s390);
set_s390_lc_mask(&sys_info.lc_mask);
break;
default:
- ERROR_MSG("unknown architecture 0x%x.\n",arch);
+ pr_alert("0x%x is an unknown architecture.\n",arch);
return -EINVAL;
}
sys_info.arch = arch;
@@ -674,7 +675,8 @@ static int __init zcore_init(void)
#ifndef __s390x__
if (arch == ARCH_S390X) {
- ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
+ pr_alert("The 32-bit dump tool cannot be used for a "
+ "64-bit system\n");
rc = -EINVAL;
goto fail;
}
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 2f547b840ef0..fe00be3675cd 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -9,6 +9,9 @@
* Arnd Bergmann (arndb@de.ibm.com)
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
@@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid,
{
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
if (msgtrigger)
- printk(KERN_WARNING "cio: Invalid cio_ignore range "
- "0.%x.%04x-0.%x.%04x\n", from_ssid, from,
- to_ssid, to);
+ pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
+ "range for cio_ignore\n", from_ssid, from,
+ to_ssid, to);
+
return 1;
}
@@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
rc = 0;
out:
if (rc && msgtrigger)
- printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n",
- str);
+ pr_warning("%s is not a valid device for the cio_ignore "
+ "kernel parameter\n", str);
return rc;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 3ac2c2019f5e..918e6fce2573 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -19,6 +19,8 @@
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
+#define CCW_BUS_ID_SIZE 20
+
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
@@ -89,15 +91,23 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const
gdev = to_ccwgroupdev(dev);
- if (gdev->state != CCWGROUP_OFFLINE)
- return -EINVAL;
-
+ /* Prevent concurrent online/offline processing and ungrouping. */
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state != CCWGROUP_OFFLINE) {
+ rc = -EINVAL;
+ goto out;
+ }
/* Note that we cannot unregister the device from one of its
* attribute methods, so we have to use this roundabout approach.
*/
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
- if (rc)
- count = rc;
+out:
+ if (rc) {
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
+ return rc;
+ }
return count;
}
@@ -172,7 +182,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
len = end - start + 1;
end++;
}
- if (len < BUS_ID_SIZE) {
+ if (len < CCW_BUS_ID_SIZE) {
strlcpy(bus_id, start, len);
rc = 0;
} else
@@ -181,7 +191,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id)
return rc;
}
-static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE])
+static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
{
int cssid, ssid, devno;
@@ -213,7 +223,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
{
struct ccwgroup_device *gdev;
int rc, i;
- char tmp_bus_id[BUS_ID_SIZE];
+ char tmp_bus_id[CCW_BUS_ID_SIZE];
const char *curr_buf;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 29826fdd47b8..ebab6ea4659b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -8,6 +8,9 @@
* Arnd Bergmann (arndb@de.ibm.com)
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -333,6 +336,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
struct chp_config_data *data;
struct chp_id chpid;
int num;
+ char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
if (sei_area->rs != 0)
@@ -343,8 +347,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
if (!chp_test_bit(data->map, num))
continue;
chpid.id = num;
- printk(KERN_WARNING "cio: processing configure event %d for "
- "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
+ pr_notice("Processing %s for channel path %x.%02x\n",
+ events[data->op], chpid.cssid, chpid.id);
switch (data->op) {
case 0:
chp_cfg_schedule(chpid, 1);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index f49f0e502b8d..0a2f2edafc03 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -61,7 +61,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
}
private->request = NULL;
memcpy(&request->irb, irb, sizeof(*irb));
- stsch(sch->schid, &sch->schib);
+ cio_update_schib(sch);
complete(&request->completion);
put_device(&sch->dev);
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 3db2c386546f..8a8df7552969 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -9,6 +9,9 @@
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -104,44 +107,6 @@ cio_get_options (struct subchannel *sch)
return flags;
}
-/*
- * Use tpi to get a pending interrupt, call the interrupt handler and
- * return a pointer to the subchannel structure.
- */
-static int
-cio_tpi(void)
-{
- struct tpi_info *tpi_info;
- struct subchannel *sch;
- struct irb *irb;
- int irq_context;
-
- tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
- if (tpi (NULL) != 1)
- return 0;
- irb = (struct irb *) __LC_IRB;
- /* Store interrupt response block to lowcore. */
- if (tsch (tpi_info->schid, irb) != 0)
- /* Not status pending or not operational. */
- return 1;
- sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
- if (!sch)
- return 1;
- irq_context = in_interrupt();
- if (!irq_context)
- local_bh_disable();
- irq_enter ();
- spin_lock(sch->lock);
- memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
- if (sch->driver && sch->driver->irq)
- sch->driver->irq(sch);
- spin_unlock(sch->lock);
- irq_exit ();
- if (!irq_context)
- _local_bh_enable();
- return 1;
-}
-
static int
cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
{
@@ -152,11 +117,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
else
sch->lpm = 0;
- stsch (sch->schid, &sch->schib);
-
CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
"subchannel 0.%x.%04x!\n", sch->schid.ssid,
sch->schid.sch_no);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
sprintf(dbf_text, "no%s", dev_name(&sch->dev));
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -354,7 +321,8 @@ cio_cancel (struct subchannel *sch)
switch (ccode) {
case 0: /* success */
/* Update information in scsw. */
- stsch (sch->schid, &sch->schib);
+ if (cio_update_schib(sch))
+ return -ENODEV;
return 0;
case 1: /* status pending */
return -EBUSY;
@@ -365,30 +333,70 @@ cio_cancel (struct subchannel *sch)
}
}
+
+static void cio_apply_config(struct subchannel *sch, struct schib *schib)
+{
+ schib->pmcw.intparm = sch->config.intparm;
+ schib->pmcw.mbi = sch->config.mbi;
+ schib->pmcw.isc = sch->config.isc;
+ schib->pmcw.ena = sch->config.ena;
+ schib->pmcw.mme = sch->config.mme;
+ schib->pmcw.mp = sch->config.mp;
+ schib->pmcw.csense = sch->config.csense;
+ schib->pmcw.mbfc = sch->config.mbfc;
+ if (sch->config.mbfc)
+ schib->mba = sch->config.mba;
+}
+
+static int cio_check_config(struct subchannel *sch, struct schib *schib)
+{
+ return (schib->pmcw.intparm == sch->config.intparm) &&
+ (schib->pmcw.mbi == sch->config.mbi) &&
+ (schib->pmcw.isc == sch->config.isc) &&
+ (schib->pmcw.ena == sch->config.ena) &&
+ (schib->pmcw.mme == sch->config.mme) &&
+ (schib->pmcw.mp == sch->config.mp) &&
+ (schib->pmcw.csense == sch->config.csense) &&
+ (schib->pmcw.mbfc == sch->config.mbfc) &&
+ (!sch->config.mbfc || (schib->mba == sch->config.mba));
+}
+
/*
- * Function: cio_modify
- * Issues a "Modify Subchannel" on the specified subchannel
+ * cio_commit_config - apply configuration to the subchannel
*/
-int
-cio_modify (struct subchannel *sch)
+int cio_commit_config(struct subchannel *sch)
{
- int ccode, retry, ret;
+ struct schib schib;
+ int ccode, retry, ret = 0;
+
+ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ return -ENODEV;
- ret = 0;
for (retry = 0; retry < 5; retry++) {
- ccode = msch_err (sch->schid, &sch->schib);
- if (ccode < 0) /* -EIO if msch gets a program check. */
+ /* copy desired changes to local schib */
+ cio_apply_config(sch, &schib);
+ ccode = msch_err(sch->schid, &schib);
+ if (ccode < 0) /* -EIO if msch gets a program check. */
return ccode;
switch (ccode) {
case 0: /* successfull */
- return 0;
- case 1: /* status pending */
+ if (stsch(sch->schid, &schib) ||
+ !css_sch_is_valid(&schib))
+ return -ENODEV;
+ if (cio_check_config(sch, &schib)) {
+ /* commit changes from local schib */
+ memcpy(&sch->schib, &schib, sizeof(schib));
+ return 0;
+ }
+ ret = -EAGAIN;
+ break;
+ case 1: /* status pending */
return -EBUSY;
- case 2: /* busy */
- udelay (100); /* allow for recovery */
+ case 2: /* busy */
+ udelay(100); /* allow for recovery */
ret = -EBUSY;
break;
- case 3: /* not operational */
+ case 3: /* not operational */
return -ENODEV;
}
}
@@ -396,6 +404,23 @@ cio_modify (struct subchannel *sch)
}
/**
+ * cio_update_schib - Perform stsch and update schib if subchannel is valid.
+ * @sch: subchannel on which to perform stsch
+ * Return zero on success, -ENODEV otherwise.
+ */
+int cio_update_schib(struct subchannel *sch)
+{
+ struct schib schib;
+
+ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ return -ENODEV;
+
+ memcpy(&sch->schib, &schib, sizeof(schib));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cio_update_schib);
+
+/**
* cio_enable_subchannel - enable a subchannel.
* @sch: subchannel to be enabled
* @intparm: interruption parameter to set
@@ -403,7 +428,6 @@ cio_modify (struct subchannel *sch)
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
char dbf_txt[15];
- int ccode;
int retry;
int ret;
@@ -412,33 +436,27 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
if (sch_is_pseudo_sch(sch))
return -EINVAL;
- ccode = stsch (sch->schid, &sch->schib);
- if (ccode)
+ if (cio_update_schib(sch))
return -ENODEV;
- for (retry = 5, ret = 0; retry > 0; retry--) {
- sch->schib.pmcw.ena = 1;
- sch->schib.pmcw.isc = sch->isc;
- sch->schib.pmcw.intparm = intparm;
- ret = cio_modify(sch);
- if (ret == -ENODEV)
- break;
- if (ret == -EIO)
+ sch->config.ena = 1;
+ sch->config.isc = sch->isc;
+ sch->config.intparm = intparm;
+
+ for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
/*
- * Got a program check in cio_modify. Try without
+ * Got a program check in msch. Try without
* the concurrent sense bit the next time.
*/
- sch->schib.pmcw.csense = 0;
- if (ret == 0) {
- stsch (sch->schid, &sch->schib);
- if (sch->schib.pmcw.ena)
- break;
- }
- if (ret == -EBUSY) {
+ sch->config.csense = 0;
+ } else if (ret == -EBUSY) {
struct irb irb;
if (tsch(sch->schid, &irb) != 0)
break;
- }
+ } else
+ break;
}
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -453,8 +471,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
int cio_disable_subchannel(struct subchannel *sch)
{
char dbf_txt[15];
- int ccode;
- int retry;
int ret;
CIO_TRACE_EVENT (2, "dissch");
@@ -462,8 +478,7 @@ int cio_disable_subchannel(struct subchannel *sch)
if (sch_is_pseudo_sch(sch))
return 0;
- ccode = stsch (sch->schid, &sch->schib);
- if (ccode == 3) /* Not operational. */
+ if (cio_update_schib(sch))
return -ENODEV;
if (scsw_actl(&sch->schib.scsw) != 0)
@@ -473,24 +488,9 @@ int cio_disable_subchannel(struct subchannel *sch)
*/
return -EBUSY;
- for (retry = 5, ret = 0; retry > 0; retry--) {
- sch->schib.pmcw.ena = 0;
- ret = cio_modify(sch);
- if (ret == -ENODEV)
- break;
- if (ret == -EBUSY)
- /*
- * The subchannel is busy or status pending.
- * We'll disable when the next interrupt was delivered
- * via the state machine.
- */
- break;
- if (ret == 0) {
- stsch (sch->schid, &sch->schib);
- if (!sch->schib.pmcw.ena)
- break;
- }
- }
+ sch->config.ena = 0;
+ ret = cio_commit_config(sch);
+
sprintf (dbf_txt, "ret:%d", ret);
CIO_TRACE_EVENT (2, dbf_txt);
return ret;
@@ -687,6 +687,43 @@ static char console_sch_name[10] = "0.x.xxxx";
static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
+/*
+ * Use tpi to get a pending interrupt, call the interrupt handler and
+ * return a pointer to the subchannel structure.
+ */
+static int cio_tpi(void)
+{
+ struct tpi_info *tpi_info;
+ struct subchannel *sch;
+ struct irb *irb;
+ int irq_context;
+
+ tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
+ if (tpi(NULL) != 1)
+ return 0;
+ irb = (struct irb *) __LC_IRB;
+ /* Store interrupt response block to lowcore. */
+ if (tsch(tpi_info->schid, irb) != 0)
+ /* Not status pending or not operational. */
+ return 1;
+ sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
+ if (!sch)
+ return 1;
+ irq_context = in_interrupt();
+ if (!irq_context)
+ local_bh_disable();
+ irq_enter();
+ spin_lock(sch->lock);
+ memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ spin_unlock(sch->lock);
+ irq_exit();
+ if (!irq_context)
+ _local_bh_enable();
+ return 1;
+}
+
void *cio_get_console_priv(void)
{
return &console_priv;
@@ -780,7 +817,7 @@ cio_probe_console(void)
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
console_subchannel_in_use = 0;
- printk(KERN_WARNING "cio: No ccw console found!\n");
+ pr_warning("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
memset(&console_subchannel, 0, sizeof(struct subchannel));
@@ -796,10 +833,9 @@ cio_probe_console(void)
* enable console I/O-interrupt subclass
*/
isc_register(CONSOLE_ISC);
- console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
- console_subchannel.schib.pmcw.intparm =
- (u32)(addr_t)&console_subchannel;
- ret = cio_modify(&console_subchannel);
+ console_subchannel.config.isc = CONSOLE_ISC;
+ console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
+ ret = cio_commit_config(&console_subchannel);
if (ret) {
isc_unregister(CONSOLE_ISC);
console_subchannel_in_use = 0;
@@ -811,8 +847,8 @@ cio_probe_console(void)
void
cio_release_console(void)
{
- console_subchannel.schib.pmcw.intparm = 0;
- cio_modify(&console_subchannel);
+ console_subchannel.config.intparm = 0;
+ cio_commit_config(&console_subchannel);
isc_unregister(CONSOLE_ISC);
console_subchannel_in_use = 0;
}
@@ -852,7 +888,8 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
cc = msch(schid, schib);
if (cc)
return (cc==3?-ENODEV:-EBUSY);
- stsch(schid, schib);
+ if (stsch(schid, schib) || !css_sch_is_valid(schib))
+ return -ENODEV;
if (!schib->pmcw.ena)
return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 0fb24784e925..5150fba742ac 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -45,6 +45,19 @@ struct pmcw {
/* ... in an operand exception. */
} __attribute__ ((packed));
+/* Target SCHIB configuration. */
+struct schib_config {
+ u64 mba;
+ u32 intparm;
+ u16 mbi;
+ u32 isc:3;
+ u32 ena:1;
+ u32 mme:2;
+ u32 mp:1;
+ u32 csense:1;
+ u32 mbfc:1;
+} __attribute__ ((packed));
+
/*
* subchannel information block
*/
@@ -82,6 +95,8 @@ struct subchannel {
struct device dev; /* entry in device tree */
struct css_driver *driver;
void *private; /* private per subchannel type data */
+ struct work_struct work;
+ struct schib_config config;
} __attribute__ ((aligned(8)));
#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
@@ -100,7 +115,8 @@ extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
-extern int cio_modify (struct subchannel *);
+extern int cio_update_schib(struct subchannel *sch);
+extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index a90b28c0be57..dc98b2c63862 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -25,6 +25,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/bootmem.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -185,56 +188,19 @@ static inline void cmf_activate(void *area, unsigned int onoff)
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
unsigned long address)
{
- int ret;
- int retry;
struct subchannel *sch;
- struct schib *schib;
sch = to_subchannel(cdev->dev.parent);
- schib = &sch->schib;
- /* msch can silently fail, so do it again if necessary */
- for (retry = 0; retry < 3; retry++) {
- /* prepare schib */
- stsch(sch->schid, schib);
- schib->pmcw.mme = mme;
- schib->pmcw.mbfc = mbfc;
- /* address can be either a block address or a block index */
- if (mbfc)
- schib->mba = address;
- else
- schib->pmcw.mbi = address;
-
- /* try to submit it */
- switch(ret = msch_err(sch->schid, schib)) {
- case 0:
- break;
- case 1:
- case 2: /* in I/O or status pending */
- ret = -EBUSY;
- break;
- case 3: /* subchannel is no longer valid */
- ret = -ENODEV;
- break;
- default: /* msch caught an exception */
- ret = -EINVAL;
- break;
- }
- stsch(sch->schid, schib); /* restore the schib */
-
- if (ret)
- break;
- /* check if it worked */
- if (schib->pmcw.mme == mme &&
- schib->pmcw.mbfc == mbfc &&
- (mbfc ? (schib->mba == address)
- : (schib->pmcw.mbi == address)))
- return 0;
+ sch->config.mme = mme;
+ sch->config.mbfc = mbfc;
+ /* address can be either a block address or a block index */
+ if (mbfc)
+ sch->config.mba = address;
+ else
+ sch->config.mbi = address;
- ret = -EINVAL;
- }
-
- return ret;
+ return cio_commit_config(sch);
}
struct set_schib_struct {
@@ -338,7 +304,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
- if (stsch(sch->schid, &sch->schib))
+ if (cio_update_schib(sch))
return -ENODEV;
if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
@@ -1359,9 +1325,8 @@ static int __init init_cmf(void)
default:
return 1;
}
-
- printk(KERN_INFO "cio: Channel measurement facility using %s "
- "format (%s)\n", format_string, detect_string);
+ pr_info("Channel measurement facility initialized using format "
+ "%s (mode %s)\n", format_string, detect_string);
return 0;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 76bbb1e74c29..8019288bc6de 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -6,6 +6,10 @@
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -128,8 +132,8 @@ css_free_subchannel(struct subchannel *sch)
{
if (sch) {
/* Reset intparm to zeroes. */
- sch->schib.pmcw.intparm = 0;
- cio_modify(sch);
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
@@ -844,8 +848,8 @@ out:
s390_unregister_crw_handler(CRW_RSC_CSS);
chsc_free_sei_area();
kfree(slow_subchannel_set);
- printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
- ret);
+ pr_alert("The CSS device driver initialization failed with "
+ "errno=%d\n", ret);
return ret;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 4e4008325e28..23d5752349b5 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -376,19 +376,23 @@ int ccw_device_set_offline(struct ccw_device *cdev)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
}
spin_unlock_irq(cdev->ccwlock);
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
return ret;
}
spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
+ if (ret == 0) {
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ } else {
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->online = 1;
}
- return ret;
+ return ret;
}
/**
@@ -411,6 +415,9 @@ int ccw_device_set_online(struct ccw_device *cdev)
return -ENODEV;
if (cdev->online || !cdev->drv)
return -EINVAL;
+ /* Hold on to an extra reference while device is online. */
+ if (!get_device(&cdev->dev))
+ return -ENODEV;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
@@ -422,10 +429,15 @@ int ccw_device_set_online(struct ccw_device *cdev)
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
return ret;
}
- if (cdev->private->state != DEV_STATE_ONLINE)
+ if (cdev->private->state != DEV_STATE_ONLINE) {
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
return -ENODEV;
+ }
if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
cdev->online = 1;
return 0;
@@ -440,6 +452,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
return (ret == 0) ? -ENODEV : ret;
}
@@ -704,6 +718,8 @@ ccw_device_release(struct device *dev)
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
+ /* Release reference of parent subchannel. */
+ put_device(cdev->dev.parent);
kfree(cdev->private);
kfree(cdev);
}
@@ -735,8 +751,8 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
/* Do first half of device_register. */
device_initialize(&cdev->dev);
if (!get_device(&sch->dev)) {
- if (cdev->dev.release)
- cdev->dev.release(&cdev->dev);
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
return -ENODEV;
}
return 0;
@@ -778,37 +794,55 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
struct subchannel *other_sch;
int ret;
- other_sch = to_subchannel(get_device(cdev->dev.parent));
+ /* Get reference for new parent. */
+ if (!get_device(&sch->dev))
+ return;
+ other_sch = to_subchannel(cdev->dev.parent);
+ /* Note: device_move() changes cdev->dev.parent */
ret = device_move(&cdev->dev, &sch->dev);
if (ret) {
CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
- put_device(&other_sch->dev);
+ /* Put reference for new parent. */
+ put_device(&sch->dev);
return;
}
sch_set_cdev(other_sch, NULL);
/* No need to keep a subchannel without ccw device around. */
css_sch_device_unregister(other_sch);
- put_device(&other_sch->dev);
sch_attach_device(sch, cdev);
+ /* Put reference for old parent. */
+ put_device(&other_sch->dev);
}
static void sch_attach_orphaned_device(struct subchannel *sch,
struct ccw_device *cdev)
{
int ret;
+ struct subchannel *pseudo_sch;
- /* Try to move the ccw device to its new subchannel. */
+ /* Get reference for new parent. */
+ if (!get_device(&sch->dev))
+ return;
+ pseudo_sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Try to move the ccw device to its new subchannel.
+ * Note: device_move() changes cdev->dev.parent
+ */
ret = device_move(&cdev->dev, &sch->dev);
if (ret) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
"failed (ret=%d)!\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
+ /* Put reference for new parent. */
+ put_device(&sch->dev);
return;
}
sch_attach_device(sch, cdev);
+ /* Put reference on pseudo subchannel. */
+ put_device(&pseudo_sch->dev);
}
static void sch_create_and_recog_new_device(struct subchannel *sch)
@@ -830,9 +864,11 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
spin_lock_irq(sch->lock);
sch_set_cdev(sch, NULL);
spin_unlock_irq(sch->lock);
- if (cdev->dev.release)
- cdev->dev.release(&cdev->dev);
css_sch_device_unregister(sch);
+ /* Put reference from io_subchannel_create_ccwdev(). */
+ put_device(&sch->dev);
+ /* Give up initial reference. */
+ put_device(&cdev->dev);
}
}
@@ -854,15 +890,20 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
dev_id.devno = sch->schib.pmcw.dev;
dev_id.ssid = sch->schid.ssid;
+ /* Increase refcount for pseudo subchannel. */
+ get_device(&css->pseudo_subchannel->dev);
/*
* Move the orphaned ccw device to the orphanage so the replacing
* ccw device can take its place on the subchannel.
+ * Note: device_move() changes cdev->dev.parent
*/
ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
if (ret) {
CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
+ /* Decrease refcount for pseudo subchannel again. */
+ put_device(&css->pseudo_subchannel->dev);
return;
}
cdev->ccwlock = css->pseudo_subchannel->lock;
@@ -875,17 +916,23 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
if (replacing_cdev) {
sch_attach_disconnected_device(sch, replacing_cdev);
/* Release reference from get_disc_ccwdev_by_dev_id() */
- put_device(&cdev->dev);
+ put_device(&replacing_cdev->dev);
+ /* Release reference of subchannel from old cdev. */
+ put_device(&sch->dev);
return;
}
replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
if (replacing_cdev) {
sch_attach_orphaned_device(sch, replacing_cdev);
/* Release reference from get_orphaned_ccwdev_by_dev_id() */
- put_device(&cdev->dev);
+ put_device(&replacing_cdev->dev);
+ /* Release reference of subchannel from old cdev. */
+ put_device(&sch->dev);
return;
}
sch_create_and_recog_new_device(sch);
+ /* Release reference of subchannel from old cdev. */
+ put_device(&sch->dev);
}
/*
@@ -903,6 +950,14 @@ io_subchannel_register(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Check if subchannel is still registered. It may have become
+ * unregistered if a machine check hit us after finishing
+ * device recognition but before the register work could be
+ * queued.
+ */
+ if (!device_is_registered(&sch->dev))
+ goto out_err;
css_update_ssd_info(sch);
/*
* io_subchannel_register() will also be called after device
@@ -910,7 +965,7 @@ io_subchannel_register(struct work_struct *work)
* be registered). We need to reprobe since we may now have sense id
* information.
*/
- if (klist_node_attached(&cdev->dev.knode_parent)) {
+ if (device_is_registered(&cdev->dev)) {
if (!cdev->drv) {
ret = device_reprobe(&cdev->dev);
if (ret)
@@ -934,22 +989,19 @@ io_subchannel_register(struct work_struct *work)
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
- put_device(&cdev->dev);
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(sch->lock, flags);
- kfree (cdev->private);
- kfree (cdev);
- put_device(&sch->dev);
- if (atomic_dec_and_test(&ccw_device_init_count))
- wake_up(&ccw_device_init_wq);
- return;
+ /* Release initial device reference. */
+ put_device(&cdev->dev);
+ goto out_err;
}
- put_device(&cdev->dev);
out:
cdev->private->flags.recog_done = 1;
- put_device(&sch->dev);
wake_up(&cdev->private->wait_q);
+out_err:
+ /* Release reference for workqueue processing. */
+ put_device(&cdev->dev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
@@ -968,8 +1020,8 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
- sch->schib.pmcw.intparm = 0;
- cio_modify(sch);
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
/* Release cdev reference for workqueue processing.*/
put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
@@ -998,8 +1050,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
- /* Release subchannel reference for asynchronous recognition. */
- put_device(&sch->dev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
@@ -1070,10 +1120,15 @@ static void ccw_device_move_to_sch(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
sch = priv->sch;
cdev = priv->cdev;
- former_parent = ccw_device_is_orphan(cdev) ?
- NULL : to_subchannel(get_device(cdev->dev.parent));
+ former_parent = to_subchannel(cdev->dev.parent);
+ /* Get reference for new parent. */
+ if (!get_device(&sch->dev))
+ return;
mutex_lock(&sch->reg_mutex);
- /* Try to move the ccw device to its new subchannel. */
+ /*
+ * Try to move the ccw device to its new subchannel.
+ * Note: device_move() changes cdev->dev.parent
+ */
rc = device_move(&cdev->dev, &sch->dev);
mutex_unlock(&sch->reg_mutex);
if (rc) {
@@ -1083,21 +1138,23 @@ static void ccw_device_move_to_sch(struct work_struct *work)
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, rc);
css_sch_device_unregister(sch);
+ /* Put reference for new parent again. */
+ put_device(&sch->dev);
goto out;
}
- if (former_parent) {
+ if (!sch_is_pseudo_sch(former_parent)) {
spin_lock_irq(former_parent->lock);
sch_set_cdev(former_parent, NULL);
spin_unlock_irq(former_parent->lock);
css_sch_device_unregister(former_parent);
/* Reset intparm to zeroes. */
- former_parent->schib.pmcw.intparm = 0;
- cio_modify(former_parent);
+ former_parent->config.intparm = 0;
+ cio_commit_config(former_parent);
}
sch_attach_device(sch, cdev);
out:
- if (former_parent)
- put_device(&former_parent->dev);
+ /* Put reference for old parent. */
+ put_device(&former_parent->dev);
put_device(&cdev->dev);
}
@@ -1113,6 +1170,15 @@ static void io_subchannel_irq(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
+void io_subchannel_init_config(struct subchannel *sch)
+{
+ memset(&sch->config, 0, sizeof(sch->config));
+ sch->config.csense = 1;
+ /* Use subchannel mp mode when there is more than 1 installed CHPID. */
+ if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
+ sch->config.mp = 1;
+}
+
static void io_subchannel_init_fields(struct subchannel *sch)
{
if (cio_is_console(sch->schid))
@@ -1127,18 +1193,34 @@ static void io_subchannel_init_fields(struct subchannel *sch)
sch->schib.pmcw.dev, sch->schid.ssid,
sch->schid.sch_no, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
- /* Initially set up some fields in the pmcw. */
- sch->schib.pmcw.ena = 0;
- sch->schib.pmcw.csense = 1; /* concurrent sense */
- if ((sch->lpm & (sch->lpm - 1)) != 0)
- sch->schib.pmcw.mp = 1; /* multipath mode */
- /* clean up possible residual cmf stuff */
- sch->schib.pmcw.mme = 0;
- sch->schib.pmcw.mbfc = 0;
- sch->schib.pmcw.mbi = 0;
- sch->schib.mba = 0;
+
+ io_subchannel_init_config(sch);
}
+static void io_subchannel_do_unreg(struct work_struct *work)
+{
+ struct subchannel *sch;
+
+ sch = container_of(work, struct subchannel, work);
+ css_sch_device_unregister(sch);
+ /* Reset intparm to zeroes. */
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ put_device(&sch->dev);
+}
+
+/* Schedule unregister if we have no cdev. */
+static void io_subchannel_schedule_removal(struct subchannel *sch)
+{
+ get_device(&sch->dev);
+ INIT_WORK(&sch->work, io_subchannel_do_unreg);
+ queue_work(slow_path_wq, &sch->work);
+}
+
+/*
+ * Note: We always return 0 so that we bind to the device even on error.
+ * This is needed so that our remove function is called on unregister.
+ */
static int io_subchannel_probe(struct subchannel *sch)
{
struct ccw_device *cdev;
@@ -1168,9 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch)
ccw_device_register(cdev);
/*
* Check if the device is already online. If it is
- * the reference count needs to be corrected
- * (see ccw_device_online and css_init_done for the
- * ugly details).
+ * the reference count needs to be corrected since we
+ * didn't obtain a reference in ccw_device_set_online.
*/
if (cdev->private->state != DEV_STATE_NOT_OPER &&
cdev->private->state != DEV_STATE_OFFLINE &&
@@ -1179,23 +1260,24 @@ static int io_subchannel_probe(struct subchannel *sch)
return 0;
}
io_subchannel_init_fields(sch);
- /*
- * First check if a fitting device may be found amongst the
- * disconnected devices or in the orphanage.
- */
- dev_id.devno = sch->schib.pmcw.dev;
- dev_id.ssid = sch->schid.ssid;
+ rc = cio_commit_config(sch);
+ if (rc)
+ goto out_schedule;
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
- return rc;
+ goto out_schedule;
/* Allocate I/O subchannel private data. */
sch->private = kzalloc(sizeof(struct io_subchannel_private),
GFP_KERNEL | GFP_DMA);
- if (!sch->private) {
- rc = -ENOMEM;
+ if (!sch->private)
goto out_err;
- }
+ /*
+ * First check if a fitting device may be found amongst the
+ * disconnected devices or in the orphanage.
+ */
+ dev_id.devno = sch->schib.pmcw.dev;
+ dev_id.ssid = sch->schid.ssid;
cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
if (!cdev)
cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1213,24 +1295,21 @@ static int io_subchannel_probe(struct subchannel *sch)
return 0;
}
cdev = io_subchannel_create_ccwdev(sch);
- if (IS_ERR(cdev)) {
- rc = PTR_ERR(cdev);
+ if (IS_ERR(cdev))
goto out_err;
- }
rc = io_subchannel_recog(cdev, sch);
if (rc) {
spin_lock_irqsave(sch->lock, flags);
- sch_set_cdev(sch, NULL);
+ io_subchannel_recog_done(cdev);
spin_unlock_irqrestore(sch->lock, flags);
- if (cdev->dev.release)
- cdev->dev.release(&cdev->dev);
- goto out_err;
}
return 0;
out_err:
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
- return rc;
+out_schedule:
+ io_subchannel_schedule_removal(sch);
+ return 0;
}
static int
@@ -1275,10 +1354,7 @@ static void io_subchannel_verify(struct subchannel *sch)
static int check_for_io_on_path(struct subchannel *sch, int mask)
{
- int cc;
-
- cc = stsch(sch->schid, &sch->schib);
- if (cc)
+ if (cio_update_schib(sch))
return 0;
if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
return 1;
@@ -1347,15 +1423,13 @@ static int io_subchannel_chp_event(struct subchannel *sch,
io_subchannel_verify(sch);
break;
case CHP_OFFLINE:
- if (stsch(sch->schid, &sch->schib))
- return -ENXIO;
- if (!css_sch_is_valid(&sch->schib))
+ if (cio_update_schib(sch))
return -ENODEV;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_ONLINE:
- if (stsch(sch->schid, &sch->schib))
- return -ENXIO;
+ if (cio_update_schib(sch))
+ return -ENODEV;
sch->lpm |= mask & sch->opm;
io_subchannel_verify(sch);
break;
@@ -1610,8 +1684,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */
- sch->schib.pmcw.intparm = 0;
- cio_modify(sch);
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
break;
case REPROBE:
ccw_device_trigger_reprobe(cdev);
@@ -1652,6 +1726,9 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
sch->private = cio_get_console_priv();
memset(sch->private, 0, sizeof(struct io_subchannel_private));
io_subchannel_init_fields(sch);
+ rc = cio_commit_config(sch);
+ if (rc)
+ return rc;
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
@@ -1723,7 +1800,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
bus_id = id;
- return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0);
+ return (strcmp(bus_id, dev_name(dev)) == 0);
}
@@ -1806,6 +1883,8 @@ ccw_device_remove (struct device *dev)
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
+ /* Give up reference obtained in ccw_device_set_online(). */
+ put_device(&cdev->dev);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 104ed669db43..0f2e63ea48de 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -76,6 +76,7 @@ extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
void io_subchannel_recog_done(struct ccw_device *cdev);
+void io_subchannel_init_config(struct subchannel *sch);
int ccw_device_cancel_halt_clear(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 10bc03940fb3..8df5eaafc5ab 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -140,8 +140,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
int ret;
sch = to_subchannel(cdev->dev.parent);
- ret = stsch(sch->schid, &sch->schib);
- if (ret || !sch->schib.pmcw.dnv)
+ if (cio_update_schib(sch))
return -ENODEV;
if (!sch->schib.pmcw.ena)
/* Not operational -> done. */
@@ -245,11 +244,13 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
* through ssch() and the path information is up to date.
*/
old_lpm = sch->lpm;
- stsch(sch->schid, &sch->schib);
- sch->lpm = sch->schib.pmcw.pam & sch->opm;
+
/* Check since device may again have become not operational. */
- if (!sch->schib.pmcw.dnv)
+ if (cio_update_schib(sch))
state = DEV_STATE_NOT_OPER;
+ else
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
/* Force reprobe on all chpids. */
old_lpm = 0;
@@ -399,9 +400,6 @@ ccw_device_done(struct ccw_device *cdev, int state)
ccw_device_oper_notify(cdev);
}
wake_up(&cdev->private->wait_q);
-
- if (css_init_done && state != DEV_STATE_ONLINE)
- put_device (&cdev->dev);
}
static int cmp_pgid(struct pgid *p1, struct pgid *p2)
@@ -552,7 +550,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
sch = to_subchannel(cdev->dev.parent);
/* Update schib - pom may have changed. */
- stsch(sch->schid, &sch->schib);
+ if (cio_update_schib(sch)) {
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ return;
+ }
/* Update lpm with verified path mask. */
sch->lpm = sch->vpm;
/* Repeat path verification? */
@@ -611,8 +613,6 @@ ccw_device_online(struct ccw_device *cdev)
(cdev->private->state != DEV_STATE_BOXED))
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
- if (css_init_done && !get_device(&cdev->dev))
- return -ENODEV;
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
if (ret != 0) {
/* Couldn't enable the subchannel for i/o. Sick device. */
@@ -672,7 +672,7 @@ ccw_device_offline(struct ccw_device *cdev)
return 0;
}
sch = to_subchannel(cdev->dev.parent);
- if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
+ if (cio_update_schib(sch))
return -ENODEV;
if (scsw_actl(&sch->schib.scsw) != 0)
return -EBUSY;
@@ -750,7 +750,10 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
* Since we might not just be coming from an interrupt from the
* subchannel we have to update the schib.
*/
- stsch(sch->schid, &sch->schib);
+ if (cio_update_schib(sch)) {
+ ccw_device_verify_done(cdev, -ENODEV);
+ return;
+ }
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
@@ -1016,20 +1019,21 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
/* Update some values. */
- if (stsch(sch->schid, &sch->schib))
- return;
- if (!sch->schib.pmcw.dnv)
+ if (cio_update_schib(sch))
return;
/*
* The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/
*/
sch->lpm = sch->schib.pmcw.pam & sch->opm;
- /* Re-set some bits in the pmcw that were lost. */
- sch->schib.pmcw.csense = 1;
- sch->schib.pmcw.ena = 0;
- if ((sch->lpm & (sch->lpm - 1)) != 0)
- sch->schib.pmcw.mp = 1;
+ /*
+ * Use the initial configuration since we can't be shure that the old
+ * paths are valid.
+ */
+ io_subchannel_init_config(sch);
+ if (cio_commit_config(sch))
+ return;
+
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 86bc94eb607f..fc5ca1dd52b3 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -504,7 +504,7 @@ ccw_device_verify_start(struct ccw_device *cdev)
sch->vpm = 0;
/* Get current pam. */
- if (stsch(sch->schid, &sch->schib)) {
+ if (cio_update_schib(sch)) {
ccw_device_verify_done(cdev, -ENODEV);
return;
}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 1b03c5423be2..5814dbee2410 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -56,7 +56,8 @@ ccw_device_path_notoper(struct ccw_device *cdev)
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
- stsch (sch->schid, &sch->schib);
+ if (cio_update_schib(sch))
+ goto doverify;
CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
"not operational \n", __func__,
@@ -64,6 +65,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
+doverify:
cdev->private->flags.doverify = 1;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index e3ea1d5f2810..42f2b09631b6 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -10,10 +10,10 @@
#include <asm/page.h>
#include <asm/schid.h>
+#include <asm/debug.h>
#include "chsc.h"
#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
-#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
/*
@@ -111,12 +111,12 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
}
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
- int *start, int *count)
+ int *start, int *count, int ack)
{
register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
- unsigned long _state = 0;
+ unsigned long _state = (unsigned long)ack << 63;
asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0"
@@ -133,7 +133,7 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count) { return 0; }
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
- int *start, int *count) { return 0; }
+ int *start, int *count, int ack) { return 0; }
#endif /* CONFIG_64BIT */
struct qdio_irq;
@@ -186,20 +186,14 @@ struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
+ /* how much sbals are acknowledged with qebsm */
+ int ack_count;
+
/* last time of noticing incoming data */
u64 timestamp;
-
- /* lock for clearing the acknowledgement */
- spinlock_t lock;
};
struct qdio_output_q {
- /* failed siga-w attempts*/
- atomic_t busy_siga_counter;
-
- /* start time of busy condition */
- u64 timestamp;
-
/* PCIs are enabled for the queue */
int pci_out_enabled;
@@ -250,6 +244,7 @@ struct qdio_q {
struct qdio_irq *irq_ptr;
struct tasklet_struct tasklet;
+ spinlock_t lock;
/* error condition during a data transfer */
unsigned int qdio_error;
@@ -300,11 +295,13 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ debug_info_t *debug_area;
struct mutex setup_mutex;
};
/* helper functions */
#define queue_type(q) q->irq_ptr->qib.qfmt
+#define SCH_NO(q) (q->irq_ptr->schid.sch_no)
#define is_thinint_irq(irq) \
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
@@ -348,10 +345,13 @@ static inline unsigned long long get_usecs(void)
((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
#define add_buf(bufnr, inc) \
((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
+#define sub_buf(bufnr, dec) \
+ ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
void qdio_sync_after_thinint(struct qdio_q *q);
-int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
+int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
+ int auto_ack);
void qdio_check_outbound_after_thinint(struct qdio_q *q);
int qdio_inbound_q_moved(struct qdio_q *q);
void qdio_kick_inbound_handler(struct qdio_q *q);
@@ -378,10 +378,15 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
int nr_output_qs);
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+ struct subchannel_id *schid,
+ struct qdio_ssqd_desc *data);
int qdio_setup_irq(struct qdio_initialize *init_data);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_release_memory(struct qdio_irq *irq_ptr);
+int qdio_setup_create_sysfs(struct ccw_device *cdev);
+void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f05590355be8..f8a3b6967f69 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -14,7 +14,7 @@
#include "qdio.h"
debug_info_t *qdio_dbf_setup;
-debug_info_t *qdio_dbf_trace;
+debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
#define MAX_DEBUGFS_QUEUES 32
@@ -22,59 +22,33 @@ static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex);
#define QDIO_DEBUGFS_NAME_LEN 40
-void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
+void qdio_allocate_dbf(struct qdio_initialize *init_data,
+ struct qdio_irq *irq_ptr)
{
- char dbf_text[20];
-
- sprintf(dbf_text, "qfmt:%x", init_data->q_format);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
- sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
- QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
- QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
- sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
- QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
- QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
- QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
- QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
- QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
-}
-
-static void qdio_unregister_dbf_views(void)
-{
- if (qdio_dbf_setup)
- debug_unregister(qdio_dbf_setup);
- if (qdio_dbf_trace)
- debug_unregister(qdio_dbf_trace);
-}
-
-static int qdio_register_dbf_views(void)
-{
- qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
- QDIO_DBF_SETUP_NR_AREAS,
- QDIO_DBF_SETUP_LEN);
- if (!qdio_dbf_setup)
- goto oom;
- debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
- debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
-
- qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
- QDIO_DBF_TRACE_NR_AREAS,
- QDIO_DBF_TRACE_LEN);
- if (!qdio_dbf_trace)
- goto oom;
- debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
- debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
- return 0;
-oom:
- qdio_unregister_dbf_views();
- return -ENOMEM;
+ char text[20];
+
+ DBF_EVENT("qfmt:%1d", init_data->q_format);
+ DBF_HEX(init_data->adapter_name, 8);
+ DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
+ DBF_HEX(&init_data->qib_param_field, sizeof(void *));
+ DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
+ DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
+ DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
+ init_data->no_output_qs);
+ DBF_HEX(&init_data->input_handler, sizeof(void *));
+ DBF_HEX(&init_data->output_handler, sizeof(void *));
+ DBF_HEX(&init_data->int_parm, sizeof(long));
+ DBF_HEX(&init_data->flags, sizeof(long));
+ DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
+ DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
+ DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
+
+ /* allocate trace view for the interface */
+ snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
+ irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+ debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
+ debug_set_level(irq_ptr->debug_area, DBF_WARN);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
}
static int qstat_show(struct seq_file *m, void *v)
@@ -86,16 +60,18 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
+ seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
seq_printf(m, "ftc: %d\n", q->first_to_check);
seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
seq_printf(m, "polling: %d\n", q->u.in.polling);
+ seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
seq_printf(m, "slsb buffer states:\n");
+ seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- get_buf_state(q, i, &state);
+ get_buf_state(q, i, &state, 0);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
@@ -127,6 +103,7 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "\n");
}
seq_printf(m, "\n");
+ seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
return 0;
}
@@ -223,11 +200,24 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio_queues", NULL);
- return qdio_register_dbf_views();
+
+ qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
+ debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_setup, DBF_INFO);
+ DBF_EVENT("dbf created\n");
+
+ qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
+ debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_error, DBF_INFO);
+ DBF_ERROR("dbf created\n");
+ return 0;
}
void qdio_debug_exit(void)
{
debugfs_remove(debugfs_root);
- qdio_unregister_dbf_views();
+ if (qdio_dbf_setup)
+ debug_unregister(qdio_dbf_setup);
+ if (qdio_dbf_error)
+ debug_unregister(qdio_dbf_error);
}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 5a4d85b829ad..5d70bd162ae9 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -12,80 +12,72 @@
#include <asm/qdio.h>
#include "qdio.h"
-#define QDIO_DBF_HEX(ex, name, level, addr, len) \
+/* that gives us 15 characters in the text event views */
+#define QDIO_DBF_LEN 16
+
+extern debug_info_t *qdio_dbf_setup;
+extern debug_info_t *qdio_dbf_error;
+
+/* sort out low debug levels early to avoid wasted sprints */
+static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+ return (level <= dbf_grp->level);
+}
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 6 /* informational */
+
+#undef DBF_EVENT
+#undef DBF_ERROR
+#undef DBF_DEV_EVENT
+
+#define DBF_EVENT(text...) \
do { \
- if (ex) \
- debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
- else \
- debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
} while (0)
-#define QDIO_DBF_TEXT(ex, name, level, text) \
+
+#define DBF_HEX(addr, len) \
do { \
- if (ex) \
- debug_text_exception(qdio_dbf_##name, level, text); \
- else \
- debug_text_event(qdio_dbf_##name, level, text); \
+ debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \
} while (0)
-#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
-#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
-#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
-
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
-#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
-#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
-#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
-#else
-#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
-#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
-#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
-#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
-
-#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
-#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
-#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
-
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
-#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
-#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
-#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
-#else
-#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
-#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
-#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
-#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
-#endif /* CONFIG_QDIO_DEBUG */
+#define DBF_ERROR(text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
+ } while (0)
-/* s390dbf views */
-#define QDIO_DBF_SETUP_LEN 8
-#define QDIO_DBF_SETUP_PAGES 8
-#define QDIO_DBF_SETUP_NR_AREAS 1
+#define DBF_ERROR_HEX(addr, len) \
+ do { \
+ debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \
+ } while (0)
-#define QDIO_DBF_TRACE_LEN 8
-#define QDIO_DBF_TRACE_NR_AREAS 2
-#ifdef CONFIG_QDIO_DEBUG
-#define QDIO_DBF_TRACE_PAGES 32
-#define QDIO_DBF_SETUP_LEVEL 6
-#define QDIO_DBF_TRACE_LEVEL 4
-#else /* !CONFIG_QDIO_DEBUG */
-#define QDIO_DBF_TRACE_PAGES 8
-#define QDIO_DBF_SETUP_LEVEL 2
-#define QDIO_DBF_TRACE_LEVEL 2
-#endif /* CONFIG_QDIO_DEBUG */
+#define DBF_DEV_EVENT(level, device, text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ if (qdio_dbf_passes(device->debug_area, level)) { \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(device->debug_area, level, debug_buffer); \
+ } \
+ } while (0)
-extern debug_info_t *qdio_dbf_setup;
-extern debug_info_t *qdio_dbf_trace;
+#define DBF_DEV_HEX(level, device, addr, len) \
+ do { \
+ debug_event(device->debug_area, level, (void*)(addr), len); \
+ } while (0)
-void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
-void debug_print_bstat(struct qdio_q *q);
+void qdio_allocate_dbf(struct qdio_initialize *init_data,
+ struct qdio_irq *irq_ptr);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
int qdio_debug_init(void);
void qdio_debug_exit(void);
+
#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 7c8659151993..744f928a59ea 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -74,7 +74,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
- u32 *bb, unsigned int fc)
+ unsigned int *bb, unsigned int fc)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
@@ -95,8 +95,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
{
- char dbf_text[15];
-
/* all done or next buffer state different */
if (ccq == 0 || ccq == 32)
return 0;
@@ -104,8 +102,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
if (ccq == 96 || ccq == 97)
return 1;
/* notify devices immediately */
- sprintf(dbf_text, "%d", ccq);
- QDIO_DBF_TEXT2(1, trace, dbf_text);
+ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
return -EIO;
}
@@ -115,41 +112,45 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
* @state: state of the extracted buffers
* @start: buffer number to start at
* @count: count of buffers to examine
+ * @auto_ack: automatically acknowledge buffers
*
* Returns the number of successfull extracted equal buffer states.
* Stops processing if a state is different from the last buffers state.
*/
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
- int start, int count)
+ int start, int count, int auto_ack)
{
unsigned int ccq = 0;
int tmp_count = count, tmp_start = start;
int nr = q->nr;
int rc;
- char dbf_text[15];
BUG_ON(!q->irq_ptr->sch_token);
+ qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
- ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+ ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
+ auto_ack);
rc = qdio_check_ccq(q, ccq);
/* At least one buffer was processed, return and extract the remaining
* buffers later.
*/
- if ((ccq == 96) && (count != tmp_count))
+ if ((ccq == 96) && (count != tmp_count)) {
+ qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
return (count - tmp_count);
+ }
+
if (rc == 1) {
- QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
goto again;
}
if (rc < 0) {
- QDIO_DBF_TEXT2(1, trace, "eqberr");
- sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
- QDIO_DBF_TEXT2(1, trace, dbf_text);
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev,
QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
0, -1, -1, q->irq_ptr->int_parm);
@@ -176,9 +177,12 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
int tmp_count = count, tmp_start = start;
int nr = q->nr;
int rc;
- char dbf_text[15];
+
+ if (!count)
+ return 0;
BUG_ON(!q->irq_ptr->sch_token);
+ qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -186,16 +190,13 @@ again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
- QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
+ qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
goto again;
}
if (rc < 0) {
- QDIO_DBF_TEXT3(1, trace, "sqberr");
- sprintf(dbf_text, "%2x,%2x", count, tmp_count);
- QDIO_DBF_TEXT3(1, trace, dbf_text);
- sprintf(dbf_text, "%d,%d", ccq, nr);
- QDIO_DBF_TEXT3(1, trace, dbf_text);
-
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev,
QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
0, -1, -1, q->irq_ptr->int_parm);
@@ -207,7 +208,8 @@ again:
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state, unsigned int count)
+ unsigned char *state, unsigned int count,
+ int auto_ack)
{
unsigned char __state = 0;
int i;
@@ -216,7 +218,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
if (is_qebsm(q))
- return qdio_do_eqbs(q, state, bufnr, count);
+ return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
for (i = 0; i < count; i++) {
if (!__state)
@@ -230,9 +232,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
}
inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state)
+ unsigned char *state, int auto_ack)
{
- return get_buf_states(q, bufnr, state, 1);
+ return get_buf_states(q, bufnr, state, 1, auto_ack);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -282,14 +284,12 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
if (!need_siga_sync(q))
return 0;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
qdio_perf_stat_inc(&perf_stats.siga_sync);
cc = do_siga_sync(q->irq_ptr->schid, output, input);
- if (cc) {
- QDIO_DBF_TEXT4(0, trace, "sigasync");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
- QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
- }
+ if (cc)
+ DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
return cc;
}
@@ -311,50 +311,37 @@ static inline int qdio_siga_sync_all(struct qdio_q *q)
return qdio_siga_sync(q, ~0U, ~0U);
}
-static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
+static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
{
- unsigned int fc = 0;
unsigned long schid;
+ unsigned int fc = 0;
+ u64 start_time = 0;
+ int cc;
- if (q->u.out.use_enh_siga) {
+ if (q->u.out.use_enh_siga)
fc = 3;
- }
- if (!is_qebsm(q))
- schid = *((u32 *)&q->irq_ptr->schid);
- else {
+
+ if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= 0x80;
}
- return do_siga_output(schid, q->mask, busy_bit, fc);
-}
-
-static int qdio_siga_output(struct qdio_q *q)
-{
- int cc;
- u32 busy_bit;
- u64 start_time = 0;
- char dbf_text[15];
-
- QDIO_DBF_TEXT5(0, trace, "sigaout");
- QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
+ else
+ schid = *((u32 *)&q->irq_ptr->schid);
- qdio_perf_stat_inc(&perf_stats.siga_out);
again:
- cc = qdio_do_siga_output(q, &busy_bit);
- if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
- sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr);
- QDIO_DBF_TEXT3(0, trace, dbf_text);
+ cc = do_siga_output(schid, q->mask, busy_bit, fc);
- if (!start_time)
+ /* hipersocket busy condition */
+ if (*busy_bit) {
+ WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+
+ if (!start_time) {
start_time = get_usecs();
- else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ goto again;
+ }
+ if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
-
- if (cc == 2 && busy_bit)
- cc |= QDIO_ERROR_SIGA_BUSY;
- if (cc)
- QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
return cc;
}
@@ -362,14 +349,12 @@ static inline int qdio_siga_input(struct qdio_q *q)
{
int cc;
- QDIO_DBF_TEXT4(0, trace, "sigain");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
-
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
qdio_perf_stat_inc(&perf_stats.siga_in);
cc = do_siga_input(q->irq_ptr->schid, q->mask);
if (cc)
- QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
+ DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
return cc;
}
@@ -387,35 +372,91 @@ void qdio_sync_after_thinint(struct qdio_q *q)
inline void qdio_stop_polling(struct qdio_q *q)
{
- spin_lock_bh(&q->u.in.lock);
- if (!q->u.in.polling) {
- spin_unlock_bh(&q->u.in.lock);
+ if (!q->u.in.polling)
return;
- }
+
q->u.in.polling = 0;
qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
/* show the card that we are not polling anymore */
- set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
- spin_unlock_bh(&q->u.in.lock);
+ if (is_qebsm(q)) {
+ set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = 0;
+ } else
+ set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
}
-static void announce_buffer_error(struct qdio_q *q)
+static void announce_buffer_error(struct qdio_q *q, int count)
{
- char dbf_text[15];
+ q->qdio_error |= QDIO_ERROR_SLSB_STATE;
+
+ /* special handling for no target buffer empty */
+ if ((!q->is_input_q &&
+ (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
+ qdio_perf_stat_inc(&perf_stats.outbound_target_full);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
+ q->first_to_check);
+ return;
+ }
- if (q->is_input_q)
- QDIO_DBF_TEXT3(1, trace, "inperr");
- else
- QDIO_DBF_TEXT3(0, trace, "outperr");
+ DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
+ DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
+ DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("F14:%2x F15:%2x",
+ q->sbal[q->first_to_check]->element[14].flags & 0xff,
+ q->sbal[q->first_to_check]->element[15].flags & 0xff);
+}
+
+static inline void inbound_primed(struct qdio_q *q, int count)
+{
+ int new;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
+
+ /* for QEBSM the ACK was already set by EQBS */
+ if (is_qebsm(q)) {
+ if (!q->u.in.polling) {
+ q->u.in.polling = 1;
+ q->u.in.ack_count = count;
+ q->last_move_ftc = q->first_to_check;
+ return;
+ }
+
+ /* delete the previous ACK's */
+ set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = count;
+ q->last_move_ftc = q->first_to_check;
+ return;
+ }
+
+ /*
+ * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
+ * or by the next inbound run.
+ */
+ new = add_buf(q->first_to_check, count - 1);
+ if (q->u.in.polling) {
+ /* reset the previous ACK but first set the new one */
+ set_buf_state(q, new, SLSB_P_INPUT_ACK);
+ set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
+ }
+ else {
+ q->u.in.polling = 1;
+ set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK);
+ }
- sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
- q->sbal[q->first_to_check]->element[14].flags,
- q->sbal[q->first_to_check]->element[15].flags);
- QDIO_DBF_TEXT3(1, trace, dbf_text);
- QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
+ q->last_move_ftc = new;
+ count--;
+ if (!count)
+ return;
- q->qdio_error = QDIO_ERROR_SLSB_STATE;
+ /*
+ * Need to change all PRIMED buffers to NOT_INIT, otherwise
+ * we're loosing initiative in the thinint code.
+ */
+ set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT,
+ count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -424,13 +465,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
unsigned char state;
/*
- * If we still poll don't update last_move_ftc, keep the
- * previously ACK buffer there.
- */
- if (!q->u.in.polling)
- q->last_move_ftc = q->first_to_check;
-
- /*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
* would return 0.
*/
@@ -450,34 +484,13 @@ check_next:
if (q->first_to_check == stop)
goto out;
- count = get_buf_states(q, q->first_to_check, &state, count);
+ count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count)
goto out;
switch (state) {
case SLSB_P_INPUT_PRIMED:
- QDIO_DBF_TEXT5(0, trace, "inptprim");
-
- /*
- * Only ACK the first buffer. The ACK will be removed in
- * qdio_stop_polling.
- */
- if (q->u.in.polling)
- state = SLSB_P_INPUT_NOT_INIT;
- else {
- q->u.in.polling = 1;
- state = SLSB_P_INPUT_ACK;
- }
- set_buf_state(q, q->first_to_check, state);
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- if (count > 1)
- set_buf_states(q, next_buf(q->first_to_check),
- SLSB_P_INPUT_NOT_INIT, count - 1);
-
+ inbound_primed(q, count);
/*
* No siga-sync needed for non-qebsm here, as the inbound queue
* will be synced on the next siga-r, resp.
@@ -487,7 +500,7 @@ check_next:
atomic_sub(count, &q->nr_buf_used);
goto check_next;
case SLSB_P_INPUT_ERROR:
- announce_buffer_error(q);
+ announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
@@ -495,13 +508,12 @@ check_next:
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
- QDIO_DBF_TEXT5(0, trace, "inpnipro");
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
BUG();
}
out:
- QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
return q->first_to_check;
}
@@ -515,8 +527,7 @@ int qdio_inbound_q_moved(struct qdio_q *q)
if (!need_siga_sync(q) && !pci_out_supported(q))
q->u.in.timestamp = get_usecs();
- QDIO_DBF_TEXT4(0, trace, "inhasmvd");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
return 1;
} else
return 0;
@@ -524,10 +535,7 @@ int qdio_inbound_q_moved(struct qdio_q *q)
static int qdio_inbound_q_done(struct qdio_q *q)
{
- unsigned char state;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
+ unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
@@ -538,7 +546,7 @@ static int qdio_inbound_q_done(struct qdio_q *q)
*/
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state);
+ get_buf_state(q, q->first_to_check, &state, 0);
if (state == SLSB_P_INPUT_PRIMED)
/* we got something to do */
return 0;
@@ -552,20 +560,12 @@ static int qdio_inbound_q_done(struct qdio_q *q)
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0, trace, "inqisdon");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
- sprintf(dbf_text, "pf%02x", q->first_to_check);
- QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
+ q->first_to_check);
return 1;
} else {
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0, trace, "inqisntd");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
- sprintf(dbf_text, "pf%02x", q->first_to_check);
- QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
+ q->first_to_check);
return 0;
}
}
@@ -573,9 +573,6 @@ static int qdio_inbound_q_done(struct qdio_q *q)
void qdio_kick_inbound_handler(struct qdio_q *q)
{
int count, start, end;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
qdio_perf_stat_inc(&perf_stats.inbound_handler);
@@ -586,10 +583,7 @@ void qdio_kick_inbound_handler(struct qdio_q *q)
else
count = end + QDIO_MAX_BUFFERS_PER_Q - start;
-#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text, "s=%2xc=%2x", start, count);
- QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
@@ -655,14 +649,14 @@ check_next:
if (q->first_to_check == stop)
return q->first_to_check;
- count = get_buf_states(q, q->first_to_check, &state, count);
+ count = get_buf_states(q, q->first_to_check, &state, count, 0);
if (!count)
return q->first_to_check;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
- QDIO_DBF_TEXT5(0, trace, "outpempt");
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
@@ -674,14 +668,14 @@ check_next:
break;
goto check_next;
case SLSB_P_OUTPUT_ERROR:
- announce_buffer_error(q);
+ announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
break;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
- QDIO_DBF_TEXT5(0, trace, "outpprim");
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
break;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
@@ -706,99 +700,48 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move_ftc) || q->qdio_error) {
q->last_move_ftc = bufnr;
- QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
return 1;
} else
return 0;
}
-/*
- * VM could present us cc=2 and busy bit set on SIGA-write
- * during reconfiguration of their Guest LAN (only in iqdio mode,
- * otherwise qdio is asynchronous and cc=2 and busy bit there will take
- * the queues down immediately).
- *
- * Therefore qdio_siga_output will try for a short time constantly,
- * if such a condition occurs. If it doesn't change, it will
- * increase the busy_siga_counter and save the timestamp, and
- * schedule the queue for later processing. qdio_outbound_processing
- * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
- * as often as the value of the counter. This will attempt further SIGA
- * instructions. For each successful SIGA, the counter is
- * decreased, for failing SIGAs the counter remains the same, after
- * all. After some time of no movement, qdio_kick_outbound_q will
- * finally fail and reflect corresponding error codes to call
- * the upper layer module and have it take the queues down.
- *
- * Note that this is a change from the original HiperSockets design
- * (saying cc=2 and busy bit means take the queues down), but in
- * these days Guest LAN didn't exist... excessive cc=2 with busy bit
- * conditions will still take the queues down, but the threshold is
- * higher due to the Guest LAN environment.
- *
- * Called from outbound tasklet and do_QDIO handler.
- */
static void qdio_kick_outbound_q(struct qdio_q *q)
{
- int rc;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-
- QDIO_DBF_TEXT5(0, trace, "kickoutq");
- QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
-#endif /* CONFIG_QDIO_DEBUG */
+ unsigned int busy_bit;
+ int cc;
if (!need_siga_out(q))
return;
- rc = qdio_siga_output(q);
- switch (rc) {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+ qdio_perf_stat_inc(&perf_stats.siga_out);
+
+ cc = qdio_siga_output(q, &busy_bit);
+ switch (cc) {
case 0:
- /* TODO: improve error handling for CC=0 case */
-#ifdef CONFIG_QDIO_DEBUG
- if (q->u.out.timestamp) {
- QDIO_DBF_TEXT3(0, trace, "cc2reslv");
- sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no,
- q->nr,
- atomic_read(&q->u.out.busy_siga_counter));
- QDIO_DBF_TEXT3(0, trace, dbf_text);
- }
-#endif /* CONFIG_QDIO_DEBUG */
- /* went smooth this time, reset timestamp */
- q->u.out.timestamp = 0;
break;
- /* cc=2 and busy bit */
- case (2 | QDIO_ERROR_SIGA_BUSY):
- atomic_inc(&q->u.out.busy_siga_counter);
-
- /* if the last siga was successful, save timestamp here */
- if (!q->u.out.timestamp)
- q->u.out.timestamp = get_usecs();
-
- /* if we're in time, don't touch qdio_error */
- if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
- tasklet_schedule(&q->tasklet);
- break;
+ case 2:
+ if (busy_bit) {
+ DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
+ q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY;
+ } else {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d",
+ q->nr);
+ q->qdio_error = cc;
}
- QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
-#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
- atomic_read(&q->u.out.busy_siga_counter));
- QDIO_DBF_TEXT3(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
- default:
- /* for plain cc=1, 2 or 3 */
- q->qdio_error = rc;
+ break;
+ case 1:
+ case 3:
+ DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+ q->qdio_error = cc;
+ break;
}
}
static void qdio_kick_outbound_handler(struct qdio_q *q)
{
int start, end, count;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-#endif
start = q->first_to_kick;
end = q->last_move_ftc;
@@ -807,13 +750,8 @@ static void qdio_kick_outbound_handler(struct qdio_q *q)
else
count = end + QDIO_MAX_BUFFERS_PER_Q - start;
-#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT4(0, trace, "kickouth");
- QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
-
- sprintf(dbf_text, "s=%2xc=%2x", start, count);
- QDIO_DBF_TEXT4(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
@@ -828,22 +766,18 @@ static void qdio_kick_outbound_handler(struct qdio_q *q)
static void __qdio_outbound_processing(struct qdio_q *q)
{
- int siga_attempts;
+ unsigned long flags;
qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
-
- /* see comment in qdio_kick_outbound_q */
- siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
- while (siga_attempts--) {
- atomic_dec(&q->u.out.busy_siga_counter);
- qdio_kick_outbound_q(q);
- }
+ spin_lock_irqsave(&q->lock, flags);
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_outbound_handler(q);
+ spin_unlock_irqrestore(&q->lock, flags);
+
if (queue_type(q) == QDIO_ZFCP_QFMT) {
if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
tasklet_schedule(&q->tasklet);
@@ -908,27 +842,18 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[15];
-
- QDIO_DBF_TEXT5(0, trace, "newstate");
- sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
- QDIO_DBF_TEXT5(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
irq_ptr->state = state;
mb();
}
-static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
+static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
{
- char dbf_text[15];
-
if (irb->esw.esw0.erw.cons) {
- sprintf(dbf_text, "sens%4x", schid.sch_no);
- QDIO_DBF_TEXT2(1, trace, dbf_text);
- QDIO_DBF_HEX0(0, trace, irb, 64);
- QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
+ DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
+ DBF_ERROR_HEX(irb, 64);
+ DBF_ERROR_HEX(irb->ecw, 64);
}
}
@@ -962,14 +887,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
- char dbf_text[15];
- QDIO_DBF_TEXT2(1, trace, "ick2");
- sprintf(dbf_text, "%s", dev_name(&cdev->dev));
- QDIO_DBF_TEXT2(1, trace, dbf_text);
- QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
- QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
- QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
+ DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
+ DBF_ERROR("intp :%lx", intparm);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
if (irq_ptr->nr_input_qs) {
q = irq_ptr->input_qs[0];
@@ -1022,28 +943,29 @@ static void qdio_int_error(struct ccw_device *cdev)
}
static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
- int dstat)
+ int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
- QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
+ DBF_ERROR("EQ:ck con");
goto error;
}
if (!(dstat & DEV_STAT_DEV_END)) {
- QDIO_DBF_TEXT2(1, setup, "eq:no de");
+ DBF_ERROR("EQ:no dev");
goto error;
}
if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
- QDIO_DBF_TEXT2(1, setup, "eq:badio");
+ DBF_ERROR("EQ: bad io");
goto error;
}
return 0;
error:
- QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
- QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
+ DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1;
}
@@ -1052,12 +974,8 @@ static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
int dstat)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- char dbf_text[15];
-
- sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_TEXT0(0, trace, dbf_text);
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
if (!qdio_establish_check_errors(cdev, cstat, dstat))
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
}
@@ -1068,25 +986,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
- char dbf_text[15];
qdio_perf_stat_inc(&perf_stats.qdio_int);
if (!intparm || !irq_ptr) {
- sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
+ DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
}
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
- sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
return;
case -ETIMEDOUT:
- sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
+ DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no);
qdio_int_error(cdev);
return;
default:
@@ -1094,7 +1008,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
}
- qdio_irq_check_sense(irq_ptr->schid, irb);
+ qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
@@ -1129,23 +1043,20 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
/**
* qdio_get_ssqd_desc - get qdio subchannel description
* @cdev: ccw device to get description for
+ * @data: where to store the ssqd
*
- * Returns a pointer to the saved qdio subchannel description,
- * or NULL for not setup qdio devices.
+ * Returns 0 or an error code. The results of the chsc are stored in the
+ * specified structure.
*/
-struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
+int qdio_get_ssqd_desc(struct ccw_device *cdev,
+ struct qdio_ssqd_desc *data)
{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
-
- sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- irq_ptr = cdev->private->qdio_data;
- if (!irq_ptr)
- return NULL;
+ if (!cdev || !cdev->private)
+ return -EINVAL;
- return &irq_ptr->ssqd_desc;
+ DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
+ return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
@@ -1159,14 +1070,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
*/
int qdio_cleanup(struct ccw_device *cdev, int how)
{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int rc;
- sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
-
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
@@ -1199,18 +1105,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
*/
int qdio_shutdown(struct ccw_device *cdev, int how)
{
- struct qdio_irq *irq_ptr;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int rc;
unsigned long flags;
- char dbf_text[15];
- sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
-
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
+ DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
+
mutex_lock(&irq_ptr->setup_mutex);
/*
* Subchannel was already shot down. We cannot prevent being called
@@ -1234,10 +1137,8 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
/* default behaviour is halt */
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
if (rc) {
- sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- sprintf(dbf_text, "rc=%d", rc);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4d", rc);
goto no_cleanup;
}
@@ -1271,17 +1172,18 @@ EXPORT_SYMBOL_GPL(qdio_shutdown);
*/
int qdio_free(struct ccw_device *cdev)
{
- struct qdio_irq *irq_ptr;
- char dbf_text[15];
-
- sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
+ DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
+
+ if (irq_ptr->debug_area != NULL) {
+ debug_unregister(irq_ptr->debug_area);
+ irq_ptr->debug_area = NULL;
+ }
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1300,10 +1202,6 @@ EXPORT_SYMBOL_GPL(qdio_free);
int qdio_initialize(struct qdio_initialize *init_data)
{
int rc;
- char dbf_text[15];
-
- sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
rc = qdio_allocate(init_data);
if (rc)
@@ -1323,10 +1221,8 @@ EXPORT_SYMBOL_GPL(qdio_initialize);
int qdio_allocate(struct qdio_initialize *init_data)
{
struct qdio_irq *irq_ptr;
- char dbf_text[15];
- sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
@@ -1340,16 +1236,13 @@ int qdio_allocate(struct qdio_initialize *init_data)
(!init_data->output_sbal_addr_array))
return -EINVAL;
- qdio_allocate_do_dbf(init_data);
-
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr)
goto out_err;
- QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
- QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
mutex_init(&irq_ptr->setup_mutex);
+ qdio_allocate_dbf(init_data, irq_ptr);
/*
* Allocate a page for the chsc calls in qdio_establish.
@@ -1367,9 +1260,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
goto out_rel;
WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
- QDIO_DBF_TEXT0(0, setup, "qdr:");
- QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
-
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
goto out_rel;
@@ -1390,14 +1280,12 @@ EXPORT_SYMBOL_GPL(qdio_allocate);
*/
int qdio_establish(struct qdio_initialize *init_data)
{
- char dbf_text[20];
struct qdio_irq *irq_ptr;
struct ccw_device *cdev = init_data->cdev;
unsigned long saveflags;
int rc;
- sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
@@ -1427,10 +1315,8 @@ int qdio_establish(struct qdio_initialize *init_data)
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
if (rc) {
- sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
- sprintf(dbf_text, "eq:rc%4x", rc);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
+ DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
@@ -1451,10 +1337,8 @@ int qdio_establish(struct qdio_initialize *init_data)
}
qdio_setup_ssqd_info(irq_ptr);
- sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
- sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
+ DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
+ DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
@@ -1475,10 +1359,8 @@ int qdio_activate(struct ccw_device *cdev)
struct qdio_irq *irq_ptr;
int rc;
unsigned long saveflags;
- char dbf_text[20];
- sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
@@ -1504,10 +1386,8 @@ int qdio_activate(struct ccw_device *cdev)
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
if (rc) {
- sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
- sprintf(dbf_text, "aq:rc%4x", rc);
- QDIO_DBF_TEXT2(1, setup, dbf_text);
+ DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
@@ -1565,23 +1445,38 @@ static inline int buf_in_between(int bufnr, int start, int count)
static void handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- unsigned long flags;
- int used, rc;
+ int used, cc, diff;
- /*
- * do_QDIO could run in parallel with the queue tasklet so the
- * upper-layer programm could empty the ACK'ed buffer here.
- * If that happens we must clear the polling flag, otherwise
- * qdio_stop_polling() could set the buffer to NOT_INIT after
- * it was set to EMPTY which would kill us.
- */
- spin_lock_irqsave(&q->u.in.lock, flags);
- if (q->u.in.polling)
- if (buf_in_between(q->last_move_ftc, bufnr, count))
+ if (!q->u.in.polling)
+ goto set;
+
+ /* protect against stop polling setting an ACK for an emptied slsb */
+ if (count == QDIO_MAX_BUFFERS_PER_Q) {
+ /* overwriting everything, just delete polling status */
+ q->u.in.polling = 0;
+ q->u.in.ack_count = 0;
+ goto set;
+ } else if (buf_in_between(q->last_move_ftc, bufnr, count)) {
+ if (is_qebsm(q)) {
+ /* partial overwrite, just update last_move_ftc */
+ diff = add_buf(bufnr, count);
+ diff = sub_buf(diff, q->last_move_ftc);
+ q->u.in.ack_count -= diff;
+ if (q->u.in.ack_count <= 0) {
+ q->u.in.polling = 0;
+ q->u.in.ack_count = 0;
+ /* TODO: must we set last_move_ftc to something meaningful? */
+ goto set;
+ }
+ q->last_move_ftc = add_buf(q->last_move_ftc, diff);
+ }
+ else
+ /* the only ACK will be deleted, so stop polling */
q->u.in.polling = 0;
+ }
+set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
- spin_unlock_irqrestore(&q->u.in.lock, flags);
used = atomic_add_return(count, &q->nr_buf_used) - count;
BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
@@ -1591,9 +1486,9 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags,
return;
if (need_siga_in(q)) {
- rc = qdio_siga_input(q);
- if (rc)
- q->qdio_error = rc;
+ cc = qdio_siga_input(q);
+ if (cc)
+ q->qdio_error = cc;
}
}
@@ -1640,6 +1535,10 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
while (count--)
qdio_kick_outbound_q(q);
}
+
+ /* report CC=2 conditions synchronously */
+ if (q->qdio_error)
+ __qdio_outbound_processing(q);
goto out;
}
@@ -1649,11 +1548,11 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
}
/* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state);
+ get_buf_state(q, prev_buf(bufnr), &state, 0);
if (state != SLSB_CU_OUTPUT_PRIMED)
qdio_kick_outbound_q(q);
else {
- QDIO_DBF_TEXT5(0, trace, "fast-req");
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
qdio_perf_stat_inc(&perf_stats.fast_requeue);
}
out:
@@ -1673,12 +1572,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, int bufnr, int count)
{
struct qdio_irq *irq_ptr;
-#ifdef CONFIG_QDIO_DEBUG
- char dbf_text[20];
-
- sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no);
- QDIO_DBF_TEXT3(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
(count > QDIO_MAX_BUFFERS_PER_Q) ||
@@ -1692,33 +1585,24 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
if (!irq_ptr)
return -ENODEV;
-#ifdef CONFIG_QDIO_DEBUG
if (callflags & QDIO_FLAG_SYNC_INPUT)
- QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
- sizeof(void *));
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
else
- QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
- sizeof(void *));
-
- sprintf(dbf_text, "flag%04x", callflags);
- QDIO_DBF_TEXT3(0, trace, dbf_text);
- sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
- QDIO_DBF_TEXT3(0, trace, dbf_text);
-#endif /* CONFIG_QDIO_DEBUG */
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
if (callflags & QDIO_FLAG_SYNC_INPUT)
- handle_inbound(irq_ptr->input_qs[q_nr],
- callflags, bufnr, count);
+ handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr,
+ count);
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
- handle_outbound(irq_ptr->output_qs[q_nr],
- callflags, bufnr, count);
- else {
- QDIO_DBF_TEXT3(1, trace, "doQD:inv");
+ handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr,
+ count);
+ else
return -EINVAL;
- }
return 0;
}
EXPORT_SYMBOL_GPL(do_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index ec5c4a414235..136d0f0b1e93 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -74,12 +74,20 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n");
seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
(long)atomic_long_read(&perf_stats.fast_requeue));
+ seq_printf(m, "Number of outbound target full condition\t: %li\n",
+ (long)atomic_long_read(&perf_stats.outbound_target_full));
seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_tl_out_timer));
seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_stop_polling));
seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
+ seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
+ (long)atomic_long_read(&perf_stats.debug_eqbs_all),
+ (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
+ seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
+ (long)atomic_long_read(&perf_stats.debug_sqbs_all),
+ (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
seq_printf(m, "\n");
return 0;
}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
index 5c406a8b7387..7821ac4fa517 100644
--- a/drivers/s390/cio/qdio_perf.h
+++ b/drivers/s390/cio/qdio_perf.h
@@ -36,10 +36,15 @@ struct qdio_perf_stats {
atomic_long_t inbound_handler;
atomic_long_t outbound_handler;
atomic_long_t fast_requeue;
+ atomic_long_t outbound_target_full;
/* for debugging */
atomic_long_t debug_tl_out_timer;
atomic_long_t debug_stop_polling;
+ atomic_long_t debug_eqbs_all;
+ atomic_long_t debug_eqbs_incomplete;
+ atomic_long_t debug_sqbs_all;
+ atomic_long_t debug_sqbs_incomplete;
};
extern struct qdio_perf_stats perf_stats;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a0b6b46e7466..c08356b95bf5 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -117,17 +117,16 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
+ spin_lock_init(&q->lock);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
- void **sbals_array, char *dbf_text, int i)
+ void **sbals_array, int i)
{
struct qdio_q *prev;
int j;
- QDIO_DBF_TEXT0(0, setup, dbf_text);
- QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
-
+ DBF_HEX(&q, sizeof(void *));
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
@@ -150,31 +149,26 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
- QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
- QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
- QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
- QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
+ DBF_EVENT("sl-slsb-sbal");
+ DBF_HEX(q->sl, sizeof(void *));
+ DBF_HEX(&q->slsb, sizeof(void *));
+ DBF_HEX(q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
- char dbf_text[20];
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
int i;
- sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
-
for_each_input_queue(irq_ptr, q, i) {
- sprintf(dbf_text, "in-q%4x", i);
+ DBF_EVENT("in-q:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- spin_lock_init(&q->u.in.lock);
- setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
+ setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
if (is_thinint_irq(irq_ptr))
@@ -186,12 +180,11 @@ static void setup_queues(struct qdio_irq *irq_ptr,
}
for_each_output_queue(irq_ptr, q, i) {
- sprintf(dbf_text, "outq%4x", i);
+ DBF_EVENT("outq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
q->is_input_q = 0;
- setup_storage_lists(q, irq_ptr, output_sbal_array,
- dbf_text, i);
+ setup_storage_lists(q, irq_ptr, output_sbal_array, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
tasklet_init(&q->tasklet, qdio_outbound_processing,
@@ -222,8 +215,6 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
unsigned char qdioac, unsigned long token)
{
- char dbf_text[15];
-
if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
goto no_qebsm;
if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
@@ -232,33 +223,41 @@ static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
irq_ptr->sch_token = token;
- QDIO_DBF_TEXT0(0, setup, "V=V:1");
- sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("V=V:1");
+ DBF_EVENT("%8lx", irq_ptr->sch_token);
return;
no_qebsm:
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
- QDIO_DBF_TEXT0(0, setup, "noV=V");
+ DBF_EVENT("noV=V");
}
-static int __get_ssqd_info(struct qdio_irq *irq_ptr)
+/*
+ * If there is a qdio_irq we use the chsc_page and store the information
+ * in the qdio_irq, otherwise we copy it to the specified structure.
+ */
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+ struct subchannel_id *schid,
+ struct qdio_ssqd_desc *data)
{
struct chsc_ssqd_area *ssqd;
int rc;
- QDIO_DBF_TEXT0(0, setup, "getssqd");
- ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ DBF_EVENT("getssqd:%4x", schid->sch_no);
+ if (irq_ptr != NULL)
+ ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ else
+ ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
memset(ssqd, 0, PAGE_SIZE);
ssqd->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
- ssqd->first_sch = irq_ptr->schid.sch_no;
- ssqd->last_sch = irq_ptr->schid.sch_no;
- ssqd->ssid = irq_ptr->schid.ssid;
+ ssqd->first_sch = schid->sch_no;
+ ssqd->last_sch = schid->sch_no;
+ ssqd->ssid = schid->ssid;
if (chsc(ssqd))
return -EIO;
@@ -268,27 +267,29 @@ static int __get_ssqd_info(struct qdio_irq *irq_ptr)
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
- (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
+ (ssqd->qdio_ssqd.sch != schid->sch_no))
return -EINVAL;
- memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
- sizeof(struct qdio_ssqd_desc));
+ if (irq_ptr != NULL)
+ memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
+ sizeof(struct qdio_ssqd_desc));
+ else {
+ memcpy(data, &ssqd->qdio_ssqd,
+ sizeof(struct qdio_ssqd_desc));
+ free_page((unsigned long)ssqd);
+ }
return 0;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
{
unsigned char qdioac;
- char dbf_text[15];
int rc;
- rc = __get_ssqd_info(irq_ptr);
+ rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
if (rc) {
- QDIO_DBF_TEXT2(0, setup, "ssqdasig");
- sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
- sprintf(dbf_text, "rc:%d", rc);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
+ DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%x", rc);
/* all flags set, worst case */
qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
AC1_SIGA_SYNC_NEEDED;
@@ -297,9 +298,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
-
- sprintf(dbf_text, "qdioac%2x", qdioac);
- QDIO_DBF_TEXT2(0, setup, dbf_text);
+ DBF_EVENT("qdioac:%4x", qdioac);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
@@ -419,7 +418,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
/* get qdio commands */
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
- QDIO_DBF_TEXT2(1, setup, "no eq");
+ DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
rc = -EINVAL;
goto out_err;
}
@@ -427,7 +426,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
- QDIO_DBF_TEXT2(1, setup, "no aq");
+ DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
rc = -EINVAL;
goto out_err;
}
@@ -447,56 +446,38 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
{
char s[80];
- sprintf(s, "qdio: %s ", dev_name(&cdev->dev));
- switch (irq_ptr->qib.qfmt) {
- case QDIO_QETH_QFMT:
- sprintf(s + strlen(s), "OSA ");
- break;
- case QDIO_ZFCP_QFMT:
- sprintf(s + strlen(s), "ZFCP ");
- break;
- case QDIO_IQDIO_QFMT:
- sprintf(s + strlen(s), "HS ");
- break;
- }
- sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no);
- sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr));
- sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0);
- sprintf(s + strlen(s), "PCI:%d ",
- (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0);
- sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd);
- sprintf(s + strlen(s), "SIGA:");
- sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " ");
- sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " ");
- sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " ");
- sprintf(s + strlen(s), "%s",
- (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ");
- sprintf(s + strlen(s), "%s",
- (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ");
- sprintf(s + strlen(s), "%s",
- (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
- sprintf(s + strlen(s), "\n");
+ snprintf(s, 80, "qdio: %s %s on SC %x using "
+ "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n",
+ dev_name(&cdev->dev),
+ (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
+ ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
+ irq_ptr->schid.sch_no,
+ is_thinint_irq(irq_ptr),
+ (irq_ptr->sch_token) ? 1 : 0,
+ (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
+ css_general_characteristics.aif_tdd,
+ (irq_ptr->siga_flag.input) ? "R" : " ",
+ (irq_ptr->siga_flag.output) ? "W" : " ",
+ (irq_ptr->siga_flag.sync) ? "S" : " ",
+ (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ",
+ (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ",
+ (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
printk(KERN_INFO "%s", s);
}
int __init qdio_setup_init(void)
{
- char dbf_text[15];
-
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
/* Check for OSA/FCP thin interrupts (bit 67). */
- sprintf(dbf_text, "thini%1x",
- (css_general_characteristics.aif_osa) ? 1 : 0);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("thinint:%1d",
+ (css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
- sprintf(dbf_text, "cssQBS:%1x",
- (qebsm_possible()) ? 1 : 0);
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
return 0;
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ea7f61400267..8e90e147b746 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -125,13 +125,13 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
static inline int tiqdio_inbound_q_done(struct qdio_q *q)
{
- unsigned char state;
+ unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state);
+ get_buf_state(q, q->first_to_check, &state, 0);
if (state == SLSB_P_INPUT_PRIMED)
/* more work coming */
@@ -258,8 +258,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct scssc_area *scssc_area;
- char dbf_text[15];
- void *ptr;
int rc;
scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
@@ -294,19 +292,15 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
rc = chsc_error_from_response(scssc_area->response.code);
if (rc) {
- sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
- QDIO_DBF_TEXT1(0, trace, dbf_text);
- QDIO_DBF_TEXT1(0, setup, dbf_text);
- ptr = &scssc_area->response;
- QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
+ DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
+ scssc_area->response.code);
+ DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
return rc;
}
- QDIO_DBF_TEXT2(0, setup, "setscind");
- QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
- sizeof(unsigned long));
- QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
- sizeof(unsigned long));
+ DBF_EVENT("setscind");
+ DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
+ DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long));
return 0;
}
@@ -327,14 +321,11 @@ void tiqdio_free_memory(void)
int __init tiqdio_register_thinints(void)
{
- char dbf_text[20];
-
isc_register(QDIO_AIRQ_ISC);
tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
NULL, QDIO_AIRQ_ISC);
if (IS_ERR(tiqdio_alsi)) {
- sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
- QDIO_DBF_TEXT0(0, setup, dbf_text);
+ DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
tiqdio_alsi = NULL;
isc_unregister(QDIO_AIRQ_ISC);
return -ENOMEM;
@@ -360,7 +351,7 @@ void qdio_setup_thinint(struct qdio_irq *irq_ptr)
if (!is_thinint_irq(irq_ptr))
return;
irq_ptr->dsci = get_indicator();
- QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
+ DBF_HEX(&irq_ptr->dsci, sizeof(void *));
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e3fe6838293a..1f5f5d2d87d9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -5,6 +5,7 @@
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * Felix Beck <felix.beck@de.ibm.com>
*
* Adjunct processor bus.
*
@@ -23,6 +24,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -34,6 +38,10 @@
#include <linux/mutex.h>
#include <asm/s390_rdev.h>
#include <asm/reset.h>
+#include <asm/airq.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/isc.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
@@ -46,6 +54,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
+static inline void ap_schedule_poll_timer(void);
/*
* Module description.
@@ -68,7 +77,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000);
MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
static struct device *ap_root_device = NULL;
-static DEFINE_SPINLOCK(ap_device_lock);
+static DEFINE_SPINLOCK(ap_device_list_lock);
static LIST_HEAD(ap_device_list);
/*
@@ -80,19 +89,29 @@ static int ap_config_time = AP_CONFIG_TIME;
static DECLARE_WORK(ap_config_work, ap_scan_bus);
/*
- * Tasklet & timer for AP request polling.
+ * Tasklet & timer for AP request polling and interrupts
*/
static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
+static void *ap_interrupt_indicator;
static struct hrtimer ap_poll_timer;
/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000;
/**
+ * ap_using_interrupts() - Returns non-zero if interrupt support is
+ * available.
+ */
+static inline int ap_using_interrupts(void)
+{
+ return ap_interrupt_indicator != NULL;
+}
+
+/**
* ap_intructions_available() - Test if AP instructions are available.
*
* Returns 0 if the AP instructions are installed.
@@ -113,6 +132,23 @@ static inline int ap_instructions_available(void)
}
/**
+ * ap_interrupts_available(): Test if AP interrupts are available.
+ *
+ * Returns 1 if AP interrupts are available.
+ */
+static int ap_interrupts_available(void)
+{
+ unsigned long long facility_bits[2];
+
+ if (stfle(facility_bits, 2) <= 1)
+ return 0;
+ if (!(facility_bits[0] & (1ULL << 61)) ||
+ !(facility_bits[1] & (1ULL << 62)))
+ return 0;
+ return 1;
+}
+
+/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @queue_depth: Pointer to queue depth value
@@ -152,6 +188,80 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
return reg1;
}
+#ifdef CONFIG_64BIT
+/**
+ * ap_queue_interruption_control(): Enable interruption for a specific AP.
+ * @qid: The AP queue number
+ * @ind: The notification indicator byte
+ *
+ * Returns AP queue status.
+ */
+static inline struct ap_queue_status
+ap_queue_interruption_control(ap_qid_t qid, void *ind)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
+ register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
+ register struct ap_queue_status reg1_out asm ("1");
+ register void *reg2 asm ("2") = ind;
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+ :
+ : "cc" );
+ return reg1_out;
+}
+#endif
+
+/**
+ * ap_queue_enable_interruption(): Enable interruption on an AP.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_queue_interruption_control(). Based
+ * on the return value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
+{
+#ifdef CONFIG_64BIT
+ struct ap_queue_status status;
+ int t_depth, t_device_type, rc, i;
+
+ rc = -EBUSY;
+ status = ap_queue_interruption_control(qid, ind);
+
+ for (i = 0; i < AP_MAX_RESET; i++) {
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (status.int_enabled)
+ return 0;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ return -ENODEV;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ if (status.int_enabled)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ if (i < AP_MAX_RESET - 1) {
+ udelay(5);
+ status = ap_test_queue(qid, &t_depth, &t_device_type);
+ }
+ }
+ return rc;
+#else
+ return -EINVAL;
+#endif
+}
+
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -295,6 +405,11 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
case AP_RESPONSE_CHECKSTOPPED:
rc = -ENODEV;
break;
+ case AP_RESPONSE_INVALID_ADDRESS:
+ rc = -ENODEV;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ break;
case AP_RESPONSE_BUSY:
break;
default:
@@ -345,6 +460,15 @@ static int ap_init_queue(ap_qid_t qid)
status = ap_test_queue(qid, &dummy, &dummy);
}
}
+ if (rc == 0 && ap_using_interrupts()) {
+ rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
+ /* If interruption mode is supported by the machine,
+ * but an AP can not be enabled for interruption then
+ * the AP will be discarded. */
+ if (rc)
+ pr_err("Registering adapter interrupts for "
+ "AP %d failed\n", AP_QID_DEVICE(qid));
+ }
return rc;
}
@@ -397,16 +521,16 @@ static ssize_t ap_hwtype_show(struct device *dev,
struct ap_device *ap_dev = to_ap_dev(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
}
-static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ap_device *ap_dev = to_ap_dev(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
}
-static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
static ssize_t ap_request_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -509,9 +633,9 @@ static int ap_device_probe(struct device *dev)
ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
if (!rc) {
- spin_lock_bh(&ap_device_lock);
+ spin_lock_bh(&ap_device_list_lock);
list_add(&ap_dev->list, &ap_device_list);
- spin_unlock_bh(&ap_device_lock);
+ spin_unlock_bh(&ap_device_list_lock);
}
return rc;
}
@@ -553,9 +677,9 @@ static int ap_device_remove(struct device *dev)
ap_flush_queue(ap_dev);
del_timer_sync(&ap_dev->timeout);
- spin_lock_bh(&ap_device_lock);
+ spin_lock_bh(&ap_device_list_lock);
list_del_init(&ap_dev->list);
- spin_unlock_bh(&ap_device_lock);
+ spin_unlock_bh(&ap_device_list_lock);
if (ap_drv->remove)
ap_drv->remove(ap_dev);
spin_lock_bh(&ap_dev->lock);
@@ -599,6 +723,14 @@ static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
}
+static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ap_using_interrupts() ? 1 : 0);
+}
+
+static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
+
static ssize_t ap_config_time_store(struct bus_type *bus,
const char *buf, size_t count)
{
@@ -653,7 +785,8 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
ktime_t hr_time;
/* 120 seconds = maximum poll interval */
- if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000)
+ if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
+ time > 120000000000ULL)
return -EINVAL;
poll_timeout = time;
hr_time = ktime_set(0, poll_timeout);
@@ -672,6 +805,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_config_time,
&bus_attr_poll_thread,
+ &bus_attr_ap_interrupts,
&bus_attr_poll_timeout,
NULL,
};
@@ -814,6 +948,11 @@ out:
return rc;
}
+static void ap_interrupt_handler(void *unused1, void *unused2)
+{
+ tasklet_schedule(&ap_tasklet);
+}
+
/**
* __ap_scan_bus(): Scan the AP bus.
* @dev: Pointer to device
@@ -928,6 +1067,8 @@ ap_config_timeout(unsigned long ptr)
*/
static inline void ap_schedule_poll_timer(void)
{
+ if (ap_using_interrupts())
+ return;
if (hrtimer_is_queued(&ap_poll_timer))
return;
hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout),
@@ -1181,7 +1322,7 @@ static void ap_reset(struct ap_device *ap_dev)
ap_dev->unregistered = 1;
}
-static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
{
spin_lock(&ap_dev->lock);
if (!ap_dev->unregistered) {
@@ -1207,13 +1348,19 @@ static void ap_poll_all(unsigned long dummy)
unsigned long flags;
struct ap_device *ap_dev;
+ /* Reset the indicator if interrupts are used. Thus new interrupts can
+ * be received. Doing it in the beginning of the tasklet is therefor
+ * important that no requests on any AP get lost.
+ */
+ if (ap_using_interrupts())
+ xchg((u8 *)ap_interrupt_indicator, 0);
do {
flags = 0;
- spin_lock(&ap_device_lock);
+ spin_lock(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
- __ap_poll_all(ap_dev, &flags);
+ __ap_poll_device(ap_dev, &flags);
}
- spin_unlock(&ap_device_lock);
+ spin_unlock(&ap_device_list_lock);
} while (flags & 1);
if (flags & 2)
ap_schedule_poll_timer();
@@ -1253,11 +1400,11 @@ static int ap_poll_thread(void *data)
remove_wait_queue(&ap_poll_wait, &wait);
flags = 0;
- spin_lock_bh(&ap_device_lock);
+ spin_lock_bh(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
- __ap_poll_all(ap_dev, &flags);
+ __ap_poll_device(ap_dev, &flags);
}
- spin_unlock_bh(&ap_device_lock);
+ spin_unlock_bh(&ap_device_list_lock);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&ap_poll_wait, &wait);
@@ -1268,6 +1415,8 @@ static int ap_poll_thread_start(void)
{
int rc;
+ if (ap_using_interrupts())
+ return 0;
mutex_lock(&ap_poll_thread_mutex);
if (!ap_poll_kthread) {
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -1301,8 +1450,12 @@ static void ap_request_timeout(unsigned long data)
{
struct ap_device *ap_dev = (struct ap_device *) data;
- if (ap_dev->reset == AP_RESET_ARMED)
+ if (ap_dev->reset == AP_RESET_ARMED) {
ap_dev->reset = AP_RESET_DO;
+
+ if (ap_using_interrupts())
+ tasklet_schedule(&ap_tasklet);
+ }
}
static void ap_reset_domain(void)
@@ -1337,14 +1490,25 @@ int __init ap_module_init(void)
int rc, i;
if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
- printk(KERN_WARNING "Invalid param: domain = %d. "
- " Not loading.\n", ap_domain_index);
+ pr_warning("%d is not a valid cryptographic domain\n",
+ ap_domain_index);
return -EINVAL;
}
if (ap_instructions_available() != 0) {
- printk(KERN_WARNING "AP instructions not installed.\n");
+ pr_warning("The hardware system does not support "
+ "AP instructions\n");
return -ENODEV;
}
+ if (ap_interrupts_available()) {
+ isc_register(AP_ISC);
+ ap_interrupt_indicator = s390_register_adapter_interrupt(
+ &ap_interrupt_handler, NULL, AP_ISC);
+ if (IS_ERR(ap_interrupt_indicator)) {
+ ap_interrupt_indicator = NULL;
+ isc_unregister(AP_ISC);
+ }
+ }
+
register_reset_call(&ap_reset_call);
/* Create /sys/bus/ap. */
@@ -1408,6 +1572,10 @@ out_bus:
bus_unregister(&ap_bus_type);
out:
unregister_reset_call(&ap_reset_call);
+ if (ap_using_interrupts()) {
+ s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
+ isc_unregister(AP_ISC);
+ }
return rc;
}
@@ -1443,6 +1611,10 @@ void ap_module_exit(void)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
unregister_reset_call(&ap_reset_call);
+ if (ap_using_interrupts()) {
+ s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
+ isc_unregister(AP_ISC);
+ }
}
#ifndef CONFIG_ZCRYPT_MONOLITHIC
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 446378b308fc..a35362241805 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -5,6 +5,7 @@
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * Felix Beck <felix.beck@de.ibm.com>
*
* Adjunct processor bus header file.
*
@@ -67,7 +68,8 @@ struct ap_queue_status {
unsigned int queue_empty : 1;
unsigned int replies_waiting : 1;
unsigned int queue_full : 1;
- unsigned int pad1 : 5;
+ unsigned int pad1 : 4;
+ unsigned int int_enabled : 1;
unsigned int response_code : 8;
unsigned int pad2 : 16;
};
@@ -78,6 +80,8 @@ struct ap_queue_status {
#define AP_RESPONSE_DECONFIGURED 0x03
#define AP_RESPONSE_CHECKSTOPPED 0x04
#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_INVALID_ADDRESS 0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
#define AP_RESPONSE_Q_FULL 0x10
#define AP_RESPONSE_NO_PENDING_REPLY 0x10
#define AP_RESPONSE_INDEX_TOO_BIG 0x11
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 54f4cbc3be9e..326ea08f67c9 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -264,17 +264,21 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct type80_hdr *t80h = reply->message;
+ struct type80_hdr *t80h;
int length;
/* Copy the reply message to the request message buffer. */
- if (IS_ERR(reply))
+ if (IS_ERR(reply)) {
memcpy(msg->message, &error_reply, sizeof(error_reply));
- else if (t80h->type == TYPE80_RSP_CODE) {
+ goto out;
+ }
+ t80h = reply->message;
+ if (t80h->type == TYPE80_RSP_CODE) {
length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
memcpy(msg->message, reply->message, length);
} else
memcpy(msg->message, reply->message, sizeof error_reply);
+out:
complete((struct completion *) msg->private);
}
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 12da4815ba8e..17ba81b58c78 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -247,17 +247,21 @@ static void zcrypt_pcica_receive(struct ap_device *ap_dev,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct type84_hdr *t84h = reply->message;
+ struct type84_hdr *t84h;
int length;
/* Copy the reply message to the request message buffer. */
- if (IS_ERR(reply))
+ if (IS_ERR(reply)) {
memcpy(msg->message, &error_reply, sizeof(error_reply));
- else if (t84h->code == TYPE84_RSP_CODE) {
+ goto out;
+ }
+ t84h = reply->message;
+ if (t84h->code == TYPE84_RSP_CODE) {
length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
memcpy(msg->message, reply->message, length);
} else
memcpy(msg->message, reply->message, sizeof error_reply);
+out:
complete((struct completion *) msg->private);
}
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 779952cb19fc..f4b0c4795434 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -447,19 +447,23 @@ static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct type86_reply *t86r = reply->message;
+ struct type86_reply *t86r;
int length;
/* Copy the reply message to the request message buffer. */
- if (IS_ERR(reply))
+ if (IS_ERR(reply)) {
memcpy(msg->message, &error_reply, sizeof(error_reply));
- else if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ goto out;
+ }
+ t86r = reply->message;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprb.cprb_ver_id == 0x01) {
length = sizeof(struct type86_reply) + t86r->length - 2;
length = min(PCICC_MAX_RESPONSE_SIZE, length);
memcpy(msg->message, reply->message, length);
} else
memcpy(msg->message, reply->message, sizeof error_reply);
+out:
complete((struct completion *) msg->private);
}
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index d8ad36f81540..e7a1e22e77ac 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -635,13 +635,16 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
};
struct response_type *resp_type =
(struct response_type *) msg->private;
- struct type86x_reply *t86r = reply->message;
+ struct type86x_reply *t86r;
int length;
/* Copy the reply message to the request message buffer. */
- if (IS_ERR(reply))
+ if (IS_ERR(reply)) {
memcpy(msg->message, &error_reply, sizeof(error_reply));
- else if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ goto out;
+ }
+ t86r = reply->message;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case PCIXCC_RESPONSE_TYPE_ICA:
@@ -660,6 +663,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
}
} else
memcpy(msg->message, reply->message, sizeof error_reply);
+out:
complete(&(resp_type->work));
}
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 42776550acfd..f29c7086fc19 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -13,6 +13,9 @@
#undef DEBUGDATA
#undef DEBUGCCW
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -190,21 +193,22 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
{
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
- "%s(%s): %s: %04x\n",
- CTCM_FUNTAIL, ch->id, msg, rc);
+ "%s(%s): %s: %04x\n",
+ CTCM_FUNTAIL, ch->id, msg, rc);
switch (rc) {
case -EBUSY:
- ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg);
+ pr_info("%s: The communication peer is busy\n",
+ ch->id);
fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
break;
case -ENODEV:
- ctcm_pr_emerg("%s (%s): Invalid device called for IO\n",
- ch->id, msg);
+ pr_err("%s: The specified target device is not valid\n",
+ ch->id);
fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
break;
default:
- ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
- ch->id, msg, rc);
+ pr_err("An I/O operation resulted in error %04x\n",
+ rc);
fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
}
}
@@ -886,8 +890,15 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
- } else
- ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name);
+ } else {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+ ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+ dev_warn(&dev->dev,
+ "Initialization failed with RX/TX init handshake "
+ "error %s\n", ctc_ch_event_names[event]);
+ }
}
/**
@@ -969,7 +980,9 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
ctc_ch_event_names[event], fsm_getstate_str(fi));
- ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name);
+ dev_warn(&dev->dev,
+ "Initialization failed with RX/TX init handshake "
+ "error %s\n", ctc_ch_event_names[event]);
}
}
@@ -2101,14 +2114,11 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
CTCMY_DBF_DEV_NAME(TRACE, dev, "");
if (IS_MPC(priv)) {
- ctcm_pr_info("ctcm: %s Restarting Device and "
- "MPC Group in 5 seconds\n",
- dev->name);
restart_timer = CTCM_TIME_1_SEC;
} else {
- ctcm_pr_info("%s: Restarting\n", dev->name);
restart_timer = CTCM_TIME_5_SEC;
}
+ dev_info(&dev->dev, "Restarting device\n");
dev_action_stop(fi, event, arg);
fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
@@ -2150,16 +2160,16 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
case DEV_STATE_STARTWAIT_RX:
if (event == DEV_EVENT_RXUP) {
fsm_newstate(fi, DEV_STATE_RUNNING);
- ctcm_pr_info("%s: connected with remote side\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Connected with remote side\n");
ctcm_clear_busy(dev);
}
break;
case DEV_STATE_STARTWAIT_TX:
if (event == DEV_EVENT_TXUP) {
fsm_newstate(fi, DEV_STATE_RUNNING);
- ctcm_pr_info("%s: connected with remote side\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Connected with remote side\n");
ctcm_clear_busy(dev);
}
break;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index a4e29836a2aa..2678573becec 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -21,6 +21,9 @@
#undef DEBUGDATA
#undef DEBUGCCW
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -281,14 +284,16 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
switch (PTR_ERR(irb)) {
case -EIO:
- ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev));
+ dev_err(&cdev->dev,
+ "An I/O-error occurred on the CTCM device\n");
break;
case -ETIMEDOUT:
- ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev));
+ dev_err(&cdev->dev,
+ "An adapter hardware operation timed out\n");
break;
default:
- ctcm_pr_warn("unknown error %ld on device %s\n",
- PTR_ERR(irb), dev_name(&cdev->dev));
+ dev_err(&cdev->dev,
+ "An error occurred on the adapter hardware\n");
}
return PTR_ERR(irb);
}
@@ -309,15 +314,17 @@ static inline void ccw_unit_check(struct channel *ch, __u8 sense)
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
if (ch->sense_rc != 0x01) {
- ctcm_pr_debug("%s: Interface disc. or Sel. "
- "reset (remote)\n", ch->id);
+ pr_notice(
+ "%s: The communication peer has "
+ "disconnected\n", ch->id);
ch->sense_rc = 0x01;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
} else {
if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
- ctcm_pr_debug("%s: System reset (remote)\n",
- ch->id);
+ pr_notice(
+ "%s: The remote operating system is "
+ "not available\n", ch->id);
ch->sense_rc = SNS0_INTERVENTION_REQ;
}
fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
@@ -1194,8 +1201,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
/* Check for unsolicited interrupts. */
if (cgdev == NULL) {
- ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n",
- cstat, dstat);
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
+ "%s(%s) unsolicited irq: c-%02x d-%02x\n",
+ CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
+ dev_warn(&cdev->dev,
+ "The adapter received a non-specific IRQ\n");
return;
}
@@ -1207,31 +1217,34 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
else if (priv->channel[WRITE]->cdev == cdev)
ch = priv->channel[WRITE];
else {
- ctcm_pr_err("ctcm: Can't determine channel for interrupt, "
- "device %s\n", dev_name(&cdev->dev));
+ dev_err(&cdev->dev,
+ "%s: Internal error: Can't determine channel for "
+ "interrupt device %s\n",
+ __func__, dev_name(&cdev->dev));
+ /* Explain: inconsistent internal structures */
return;
}
dev = ch->netdev;
if (dev == NULL) {
- ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n",
- __func__, dev_name(&cdev->dev), ch);
+ dev_err(&cdev->dev,
+ "%s Internal error: net_device is NULL, ch = 0x%p\n",
+ __func__, ch);
+ /* Explain: inconsistent internal structures */
return;
}
- CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
- "%s(%s): int. for %s: cstat=%02x dstat=%02x",
- CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
-
/* Copy interruption response block. */
memcpy(ch->irb, irb, sizeof(struct irb));
+ /* Issue error message and return on subchannel error code */
if (irb->scsw.cmd.cstat) {
- /* Check for good subchannel return code, otherwise error message */
fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
- ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
- dev->name, ch->id, irb->scsw.cmd.cstat,
- irb->scsw.cmd.dstat);
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): sub-ch check %s: cs=%02x ds=%02x",
+ CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
+ dev_warn(&cdev->dev,
+ "A check occurred on the subchannel\n");
return;
}
@@ -1239,7 +1252,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if ((irb->ecw[0] & ch->sense_rc) == 0)
/* print it only once */
- CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
"%s(%s): sense=%02x, ds=%02x",
CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
ccw_unit_check(ch, irb->ecw[0]);
@@ -1574,6 +1587,11 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+ dev_info(&dev->dev,
+ "setup OK : r/w = %s/%s, protocol : %d\n",
+ priv->channel[READ]->id,
+ priv->channel[WRITE]->id, priv->protocol);
+
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
priv->channel[READ]->id,
@@ -1687,7 +1705,7 @@ static void __exit ctcm_exit(void)
{
unregister_cu3088_discipline(&ctcm_group_driver);
ctcm_unregister_dbf_views();
- ctcm_pr_info("CTCM driver unloaded\n");
+ pr_info("CTCM driver unloaded\n");
}
/*
@@ -1695,7 +1713,7 @@ static void __exit ctcm_exit(void)
*/
static void print_banner(void)
{
- printk(KERN_INFO "CTCM driver initialized\n");
+ pr_info("CTCM driver initialized\n");
}
/**
@@ -1717,8 +1735,8 @@ static int __init ctcm_init(void)
ret = register_cu3088_discipline(&ctcm_group_driver);
if (ret) {
ctcm_unregister_dbf_views();
- ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline "
- "(rc = %d)\n", ret);
+ pr_err("%s / register_cu3088_discipline failed, ret = %d\n",
+ __func__, ret);
return ret;
}
print_banner();
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d77cce3fe4d4..d925e732b7d8 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -41,12 +41,6 @@
#define LOG_FLAG_NOMEM 8
#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
-#define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg)
-#define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg)
-#define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg)
-#define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg)
-#define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg)
-#define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg)
#define CTCM_PR_DEBUG(fmt, arg...) \
do { \
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 19f5d5ed85e0..3db5f846bbf6 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -19,6 +19,9 @@
#undef DEBUGDATA
#undef DEBUGCCW
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -386,7 +389,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
if (grp->allocchan_callback_retries < 4) {
if (grp->allochanfunc)
grp->allochanfunc(grp->port_num,
- grp->group_max_buflen);
+ grp->group_max_buflen);
} else {
/* there are problems...bail out */
/* there may be a state mismatch so restart */
@@ -1232,8 +1235,9 @@ done:
dev_kfree_skb_any(pskb);
if (sendrc == NET_RX_DROP) {
- printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED"
- " - PACKET DROPPED\n", dev->name, __func__);
+ dev_warn(&dev->dev,
+ "The network backlog for %s is exceeded, "
+ "package dropped\n", __func__);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
@@ -1670,10 +1674,11 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_FUNTAIL, ch->id);
}
}
-
done:
if (rc) {
- ctcm_pr_info("ctcmpc : %s() failed\n", __func__);
+ dev_warn(&dev->dev,
+ "The XID used in the MPC protocol is not valid, "
+ "rc = %d\n", rc);
priv->xid->xid2_flag2 = 0x40;
grp->saved_xid2->xid2_flag2 = 0x40;
}
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index bb2d13721d34..8452bb052d68 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -10,6 +10,9 @@
#undef DEBUGDATA
#undef DEBUGCCW
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/sysfs.h>
#include "ctcm_main.h"
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0825be87e5a0..fb6c70cec253 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,6 +26,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "lcs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/if.h>
#include <linux/netdevice.h>
@@ -54,8 +57,6 @@
#error Cannot compile lcs.c without some net devices switched on.
#endif
-#define PRINTK_HEADER " lcs: "
-
/**
* initialization string for output
*/
@@ -96,7 +97,7 @@ lcs_register_debug_facility(void)
lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
- PRINT_ERR("Not enough memory for debug facility.\n");
+ pr_err("Not enough memory for debug facility.\n");
lcs_unregister_debug_facility();
return -ENOMEM;
}
@@ -503,7 +504,9 @@ lcs_start_channel(struct lcs_channel *channel)
if (rc) {
LCS_DBF_TEXT_(4,trace,"essh%s",
dev_name(&channel->ccwdev->dev));
- PRINT_ERR("Error in starting channel, rc=%d!\n", rc);
+ dev_err(&channel->ccwdev->dev,
+ "Starting an LCS device resulted in an error,"
+ " rc=%d!\n", rc);
}
return rc;
}
@@ -640,7 +643,9 @@ __lcs_resume_channel(struct lcs_channel *channel)
if (rc) {
LCS_DBF_TEXT_(4, trace, "ersc%s",
dev_name(&channel->ccwdev->dev));
- PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
+ dev_err(&channel->ccwdev->dev,
+ "Sending data from the LCS device to the LAN failed"
+ " with rc=%d\n",rc);
} else
channel->state = LCS_CH_STATE_RUNNING;
return rc;
@@ -1086,7 +1091,7 @@ lcs_check_multicast_support(struct lcs_card *card)
cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
if (rc != 0) {
- PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
+ pr_err("Query IPAssist failed. Assuming unsupported!\n");
return -EOPNOTSUPP;
}
if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
@@ -1119,8 +1124,8 @@ list_modified:
rc = lcs_send_setipm(card, ipm);
spin_lock_irqsave(&card->ipm_lock, flags);
if (rc) {
- PRINT_INFO("Adding multicast address failed. "
- "Table possibly full!\n");
+ pr_info("Adding multicast address failed."
+ " Table possibly full!\n");
/* store ipm in failed list -> will be added
* to ipm_list again, so a retry will be done
* during the next call of this function */
@@ -1231,8 +1236,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
ipm = (struct lcs_ipm_list *)
kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
if (ipm == NULL) {
- PRINT_INFO("Not enough memory to add "
- "new multicast entry!\n");
+ pr_info("Not enough memory to add"
+ " new multicast entry!\n");
break;
}
memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
@@ -1306,18 +1311,21 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
switch (PTR_ERR(irb)) {
case -EIO:
- PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev));
+ dev_warn(&cdev->dev,
+ "An I/O-error occurred on the LCS device\n");
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
break;
case -ETIMEDOUT:
- PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev));
+ dev_warn(&cdev->dev,
+ "A command timed out on the LCS device\n");
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
break;
default:
- PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
- dev_name(&cdev->dev));
+ dev_warn(&cdev->dev,
+ "An error occurred on the LCS device, rc=%ld\n",
+ PTR_ERR(irb));
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT(2, trace, " rc???");
}
@@ -1403,8 +1411,10 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* Check for channel and device errors presented */
rc = lcs_get_problem(cdev, irb);
if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
- PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n",
- dev_name(&cdev->dev), dstat, cstat);
+ dev_warn(&cdev->dev,
+ "The LCS device stopped because of an error,"
+ " dstat=0x%X, cstat=0x%X \n",
+ dstat, cstat);
if (rc) {
channel->state = LCS_CH_STATE_ERROR;
}
@@ -1761,8 +1771,8 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
lcs_schedule_recovery(card);
break;
case LCS_CMD_STOPLAN:
- PRINT_WARN("Stoplan for %s initiated by LGW.\n",
- card->dev->name);
+ pr_warning("Stoplan for %s initiated by LGW.\n",
+ card->dev->name);
if (card->dev)
netif_carrier_off(card->dev);
break;
@@ -1790,7 +1800,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
skb = dev_alloc_skb(skb_len);
if (skb == NULL) {
- PRINT_ERR("LCS: alloc_skb failed for device=%s\n",
+ dev_err(&card->dev->dev,
+ " Allocating a socket buffer to interface %s failed\n",
card->dev->name);
card->stats.rx_dropped++;
return;
@@ -1886,7 +1897,8 @@ lcs_stop_device(struct net_device *dev)
(card->write.state != LCS_CH_STATE_RUNNING));
rc = lcs_stopcard(card);
if (rc)
- PRINT_ERR("Try it again!\n ");
+ dev_err(&card->dev->dev,
+ " Shutting down the LCS device failed\n ");
return rc;
}
@@ -1905,7 +1917,7 @@ lcs_open_device(struct net_device *dev)
/* initialize statistics */
rc = lcs_detect(card);
if (rc) {
- PRINT_ERR("LCS:Error in opening device!\n");
+ pr_err("Error in opening device!\n");
} else {
dev->flags |= IFF_UP;
@@ -2113,8 +2125,9 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
rc = lcs_detect(card);
if (rc) {
LCS_DBF_TEXT(2, setup, "dtctfail");
- PRINT_WARN("Detection of LCS card failed with return code "
- "%d (0x%x)\n", rc, rc);
+ dev_err(&card->dev->dev,
+ "Detecting a network adapter for LCS devices"
+ " failed with rc=%d (0x%x)\n", rc, rc);
lcs_stopcard(card);
goto out;
}
@@ -2144,7 +2157,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
#endif
default:
LCS_DBF_TEXT(3, setup, "errinit");
- PRINT_ERR("LCS: Initialization failed\n");
+ pr_err(" Initialization failed\n");
goto out;
}
if (!dev)
@@ -2176,13 +2189,13 @@ netdev_out:
goto out;
/* Print out supported assists: IPv6 */
- PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
- (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
- "with" : "without");
+ pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
+ (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
+ "with" : "without");
/* Print out supported assist: Multicast */
- PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
- (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
- "with" : "without");
+ pr_info("LCS device %s %s Multicast support\n", card->dev->name,
+ (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
+ "with" : "without");
return 0;
out:
@@ -2248,15 +2261,16 @@ lcs_recovery(void *ptr)
return 0;
LCS_DBF_TEXT(4, trace, "recover2");
gdev = card->gdev;
- PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev));
+ dev_warn(&gdev->dev,
+ "A recovery process has been started for the LCS device\n");
rc = __lcs_shutdown_device(gdev, 1);
rc = lcs_new_device(gdev);
if (!rc)
- PRINT_INFO("Device %s successfully recovered!\n",
- card->dev->name);
+ pr_info("Device %s successfully recovered!\n",
+ card->dev->name);
else
- PRINT_INFO("Device %s could not be recovered!\n",
- card->dev->name);
+ pr_info("Device %s could not be recovered!\n",
+ card->dev->name);
lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
return 0;
}
@@ -2308,17 +2322,17 @@ __init lcs_init_module(void)
{
int rc;
- PRINT_INFO("Loading %s\n",version);
+ pr_info("Loading %s\n", version);
rc = lcs_register_debug_facility();
LCS_DBF_TEXT(0, setup, "lcsinit");
if (rc) {
- PRINT_ERR("Initialization failed\n");
+ pr_err("Initialization failed\n");
return rc;
}
rc = register_cu3088_discipline(&lcs_group_driver);
if (rc) {
- PRINT_ERR("Initialization failed\n");
+ pr_err("Initialization failed\n");
return rc;
}
return 0;
@@ -2331,7 +2345,7 @@ __init lcs_init_module(void)
static void
__exit lcs_cleanup_module(void)
{
- PRINT_INFO("Terminating lcs module.\n");
+ pr_info("Terminating lcs module.\n");
LCS_DBF_TEXT(0, trace, "cleanup");
unregister_cu3088_discipline(&lcs_group_driver);
lcs_unregister_debug_facility();
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 0fea51e34b57..930e2fc2a011 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -31,6 +31,9 @@
*
*/
+#define KMSG_COMPONENT "netiucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#undef DEBUG
#include <linux/module.h>
@@ -846,7 +849,8 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, NULL);
- PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
+ dev_info(privptr->dev, "The peer interface of the IUCV device"
+ " has closed the connection\n");
IUCV_DBF_TEXT(data, 2,
"conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -856,13 +860,15 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
static void conn_action_start(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
int rc;
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
- conn->netdev->name, conn->userid);
+ netdev->name, conn->userid);
/*
* We must set the state before calling iucv_connect because the
@@ -876,41 +882,45 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
NULL, iucvMagic, conn);
switch (rc) {
case 0:
- conn->netdev->tx_queue_len = conn->path->msglim;
+ netdev->tx_queue_len = conn->path->msglim;
fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
CONN_EVENT_TIMER, conn);
return;
case 11:
- PRINT_INFO("%s: User %s is currently not available.\n",
- conn->netdev->name,
- netiucv_printname(conn->userid));
+ dev_warn(privptr->dev,
+ "The IUCV device failed to connect to z/VM guest %s\n",
+ netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 12:
- PRINT_INFO("%s: User %s is currently not ready.\n",
- conn->netdev->name,
- netiucv_printname(conn->userid));
+ dev_warn(privptr->dev,
+ "The IUCV device failed to connect to the peer on z/VM"
+ " guest %s\n", netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 13:
- PRINT_WARN("%s: Too many IUCV connections.\n",
- conn->netdev->name);
+ dev_err(privptr->dev,
+ "Connecting the IUCV device would exceed the maximum"
+ " number of IUCV connections\n");
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 14:
- PRINT_WARN("%s: User %s has too many IUCV connections.\n",
- conn->netdev->name,
- netiucv_printname(conn->userid));
+ dev_err(privptr->dev,
+ "z/VM guest %s has too many IUCV connections"
+ " to connect with the IUCV device\n",
+ netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
- PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
- conn->netdev->name);
+ dev_err(privptr->dev,
+ "The IUCV device cannot connect to a z/VM guest with no"
+ " IUCV authorization\n");
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
default:
- PRINT_WARN("%s: iucv_connect returned error %d\n",
- conn->netdev->name, rc);
+ dev_err(privptr->dev,
+ "Connecting the IUCV device failed with error %d\n",
+ rc);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
}
@@ -1059,8 +1069,9 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT:
fsm_newstate(fi, DEV_STATE_RUNNING);
- PRINT_INFO("%s: connected with remote side %s\n",
- dev->name, privptr->conn->userid);
+ dev_info(privptr->dev,
+ "The IUCV device has been connected"
+ " successfully to %s\n", privptr->conn->userid);
IUCV_DBF_TEXT(setup, 3,
"connection is up and running\n");
break;
@@ -1982,6 +1993,8 @@ static ssize_t conn_write(struct device_driver *drv,
if (rc)
goto out_unreg;
+ dev_info(priv->dev, "The IUCV interface to %s has been"
+ " established successfully\n", netiucv_printname(username));
return count;
@@ -2027,10 +2040,9 @@ static ssize_t remove_write (struct device_driver *drv,
continue;
read_unlock_bh(&iucv_connection_rwlock);
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN("netiucv: net device %s active with peer "
- "%s\n", ndev->name, priv->conn->userid);
- PRINT_WARN("netiucv: %s cannot be removed\n",
- ndev->name);
+ dev_warn(dev, "The IUCV device is connected"
+ " to %s and cannot be removed\n",
+ priv->conn->userid);
IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
return -EPERM;
}
@@ -2062,7 +2074,7 @@ static struct attribute_group *netiucv_drv_attr_groups[] = {
static void netiucv_banner(void)
{
- PRINT_INFO("NETIUCV driver initialized\n");
+ pr_info("driver initialized\n");
}
static void __exit netiucv_exit(void)
@@ -2088,7 +2100,7 @@ static void __exit netiucv_exit(void)
iucv_unregister(&netiucv_handler, 1);
iucv_unregister_dbf_views();
- PRINT_INFO("NETIUCV driver unloaded\n");
+ pr_info("driver unloaded\n");
return;
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index af6d60458513..d5ccce1643e4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -31,11 +31,10 @@
#include <asm/qdio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
+#include <asm/sysinfo.h>
#include "qeth_core_mpc.h"
-#define KMSG_COMPONENT "qeth"
-
/**
* Debug Facility stuff
*/
@@ -74,11 +73,6 @@ struct qeth_dbf_info {
#define QETH_DBF_TEXT_(name, level, text...) \
qeth_dbf_longtext(QETH_DBF_##name, level, text)
-/**
- * some more debug stuff
- */
-#define PRINTK_HEADER "qeth: "
-
#define SENSE_COMMAND_REJECT_BYTE 0
#define SENSE_COMMAND_REJECT_FLAG 0x80
#define SENSE_RESETTING_EVENT_BYTE 1
@@ -733,6 +727,7 @@ struct qeth_card {
struct qeth_osn_info osn_info;
struct qeth_discipline discipline;
atomic_t force_alloc_skb;
+ struct service_level qeth_service_level;
};
struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 52d26592c72c..e783644a2105 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -8,6 +8,9 @@
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
@@ -319,7 +322,10 @@ static int qeth_issue_next_read(struct qeth_card *card)
return -EIO;
iob = qeth_get_buffer(&card->read);
if (!iob) {
- PRINT_WARN("issue_next_read failed: no iob available!\n");
+ dev_warn(&card->gdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
+ "available\n", dev_name(&card->gdev->dev));
return -ENOMEM;
}
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
@@ -327,7 +333,8 @@ static int qeth_issue_next_read(struct qeth_card *card)
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0);
if (rc) {
- PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc);
+ QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
+ "rc=%i\n", dev_name(&card->gdev->dev), rc);
atomic_set(&card->read.irq_pending, 0);
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -393,10 +400,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
} else {
switch (cmd->hdr.command) {
case IPA_CMD_STOPLAN:
- PRINT_WARN("Link failure on %s (CHPID 0x%X) - "
- "there is a network problem or "
- "someone pulled the cable or "
- "disabled the port.\n",
+ dev_warn(&card->gdev->dev,
+ "The link for interface %s on CHPID"
+ " 0x%X failed\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
card->lan_online = 0;
@@ -404,9 +410,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
netif_carrier_off(card->dev);
return NULL;
case IPA_CMD_STARTLAN:
- PRINT_INFO("Link reestablished on %s "
- "(CHPID 0x%X). Scheduling "
- "IP address reset.\n",
+ dev_info(&card->gdev->dev,
+ "The link for %s on CHPID 0x%X has"
+ " been restored\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
netif_carrier_on(card->dev);
@@ -458,7 +464,7 @@ static int qeth_check_idx_response(unsigned char *buffer)
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
- PRINT_WARN("received an IDX TERMINATE "
+ QETH_DBF_MESSAGE(2, "received an IDX TERMINATE "
"with cause code 0x%02x%s\n",
buffer[4],
((buffer[4] == 0x22) ?
@@ -744,8 +750,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
- PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
- dev_name(&cdev->dev), dstat, cstat);
+ dev_warn(&cdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ",
+ dev_name(&cdev->dev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
@@ -784,12 +792,14 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
switch (PTR_ERR(irb)) {
case -EIO:
- PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev));
+ QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
+ dev_name(&cdev->dev));
QETH_DBF_TEXT(TRACE, 2, "ckirberr");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
break;
case -ETIMEDOUT:
- PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev));
+ dev_warn(&cdev->dev, "A hardware operation timed out"
+ " on the device\n");
QETH_DBF_TEXT(TRACE, 2, "ckirberr");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
@@ -802,8 +812,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
}
break;
default:
- PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
- dev_name(&cdev->dev));
+ QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
+ dev_name(&cdev->dev), PTR_ERR(irb));
QETH_DBF_TEXT(TRACE, 2, "ckirberr");
QETH_DBF_TEXT(TRACE, 2, " rc???");
}
@@ -869,10 +879,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
(dstat & DEV_STAT_UNIT_CHECK) ||
(cstat)) {
if (irb->esw.esw0.erw.cons) {
- /* TODO: we should make this s390dbf */
- PRINT_WARN("sense data available on channel %s.\n",
- CHANNEL_ID(channel));
- PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat);
+ dev_warn(&channel->ccwdev->dev,
+ "The qeth device driver failed to recover "
+ "an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
+ "0x%X dstat 0x%X\n",
+ dev_name(&channel->ccwdev->dev), cstat, dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
@@ -1138,6 +1150,14 @@ static int qeth_setup_card(struct qeth_card *card)
return 0;
}
+static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
+{
+ struct qeth_card *card = container_of(slr, struct qeth_card,
+ qeth_service_level);
+ seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card),
+ card->info.mcl_level);
+}
+
static struct qeth_card *qeth_alloc_card(void)
{
struct qeth_card *card;
@@ -1157,6 +1177,8 @@ static struct qeth_card *qeth_alloc_card(void)
return NULL;
}
card->options.layer2 = -1;
+ card->qeth_service_level.seq_print = qeth_core_sl_print;
+ register_service_level(&card->qeth_service_level);
return card;
}
@@ -1175,8 +1197,8 @@ static int qeth_determine_card_type(struct qeth_card *card)
card->qdio.no_out_queues = known_devices[i][8];
card->info.is_multicast_different = known_devices[i][9];
if (qeth_is_1920_device(card)) {
- PRINT_INFO("Priority Queueing not able "
- "due to hardware limitations!\n");
+ dev_info(&card->gdev->dev,
+ "Priority Queueing not supported\n");
card->qdio.no_out_queues = 1;
card->qdio.default_out_queue = 0;
}
@@ -1185,7 +1207,8 @@ static int qeth_determine_card_type(struct qeth_card *card)
i++;
}
card->info.type = QETH_CARD_TYPE_UNKNOWN;
- PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card));
+ dev_err(&card->gdev->dev, "The adapter hardware is of an "
+ "unknown type\n");
return -ENOENT;
}
@@ -1368,8 +1391,8 @@ static int qeth_get_unitaddr(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "getunit");
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
- PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
- CARD_DDEV_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+ dev_name(&card->gdev->dev), rc);
return rc;
}
card->info.chpid = prcd[30];
@@ -1519,7 +1542,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_ACTIVATING) {
- PRINT_WARN("IDX activate timed out!\n");
+ dev_warn(&channel->ccwdev->dev, "The qeth device driver"
+ " failed to recover an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
+ dev_name(&channel->ccwdev->dev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
qeth_clear_cmd_buffers(channel);
return -ETIME;
@@ -1552,20 +1578,21 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
- PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
- "adapter exclusively used by another host\n",
- CARD_WDEV_ID(card));
+ dev_err(&card->write.ccwdev->dev,
+ "The adapter is used exclusively by another "
+ "host\n");
else
- PRINT_ERR("IDX_ACTIVATE on write channel device %s: "
- "negative reply\n", CARD_WDEV_ID(card));
+ QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
+ " negative reply\n",
+ dev_name(&card->write.ccwdev->dev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- PRINT_WARN("IDX_ACTIVATE on write channel device %s: "
- "function level mismatch "
- "(sent: 0x%x, received: 0x%x)\n",
- CARD_WDEV_ID(card), card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
+ "function level mismatch (sent: 0x%x, received: "
+ "0x%x)\n", dev_name(&card->write.ccwdev->dev),
+ card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@@ -1591,12 +1618,13 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
- PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
- "adapter exclusively used by another host\n",
- CARD_RDEV_ID(card));
+ dev_err(&card->write.ccwdev->dev,
+ "The adapter is used exclusively by another "
+ "host\n");
else
- PRINT_ERR("IDX_ACTIVATE on read channel device %s: "
- "negative reply\n", CARD_RDEV_ID(card));
+ QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
+ " negative reply\n",
+ dev_name(&card->read.ccwdev->dev));
goto out;
}
@@ -1610,9 +1638,10 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
- PRINT_WARN("IDX_ACTIVATE on read channel device %s: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- CARD_RDEV_ID(card), card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
+ "level mismatch (sent: 0x%x, received: 0x%x)\n",
+ dev_name(&card->read.ccwdev->dev),
+ card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@@ -1686,8 +1715,9 @@ int qeth_send_control_data(struct qeth_card *card, int len,
(addr_t) iob, 0, 0);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
- PRINT_WARN("qeth_send_control_data: "
- "ccw_device_start rc = %i\n", rc);
+ QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
+ "ccw_device_start rc = %i\n",
+ dev_name(&card->write.ccwdev->dev), rc);
QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
@@ -2170,11 +2200,8 @@ static void qeth_print_status_with_portname(struct qeth_card *card)
dbf_text[i] =
(char) _ebcasc[(__u8) dbf_text[i]];
dbf_text[8] = 0;
- PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n"
+ dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n"
"with link type %s (portname: %s)\n",
- CARD_RDEV_ID(card),
- CARD_WDEV_ID(card),
- CARD_DDEV_ID(card),
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
@@ -2187,23 +2214,17 @@ static void qeth_print_status_with_portname(struct qeth_card *card)
static void qeth_print_status_no_portname(struct qeth_card *card)
{
if (card->info.portname[0])
- PRINT_INFO("Device %s/%s/%s is a%s "
+ dev_info(&card->gdev->dev, "Device is a%s "
"card%s%s%s\nwith link type %s "
"(no portname needed by interface).\n",
- CARD_RDEV_ID(card),
- CARD_WDEV_ID(card),
- CARD_DDEV_ID(card),
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
else
- PRINT_INFO("Device %s/%s/%s is a%s "
+ dev_info(&card->gdev->dev, "Device is a%s "
"card%s%s%s\nwith link type %s.\n",
- CARD_RDEV_ID(card),
- CARD_WDEV_ID(card),
- CARD_DDEV_ID(card),
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
@@ -2325,7 +2346,6 @@ static int qeth_init_input_buffer(struct qeth_card *card,
* the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
* buffers
*/
- BUG_ON(!pool_entry);
buf->pool_entry = pool_entry;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
@@ -2630,9 +2650,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
qeth_get_micros() -
card->perf_stats.inbound_do_qdio_start_time;
if (rc) {
- PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
- "return %i (device %s).\n",
- rc, CARD_DDEV_ID(card));
+ dev_warn(&card->gdev->dev,
+ "QDIO reported an error, rc=%i\n", rc);
QETH_DBF_TEXT(TRACE, 2, "qinberr");
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
}
@@ -3730,6 +3749,7 @@ static void qeth_core_free_card(struct qeth_card *card)
free_netdev(card->dev);
kfree(card->ip_tbd_list);
qeth_free_qdio_buffers(card);
+ unregister_service_level(&card->qeth_service_level);
kfree(card);
}
@@ -3757,7 +3777,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
- struct qdio_ssqd_desc *qdio_ssqd;
+ struct qdio_ssqd_desc *ssqd;
int retries = 3;
int mpno = 0;
int rc;
@@ -3766,7 +3786,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
atomic_set(&card->force_alloc_skb, 0);
retry:
if (retries < 3) {
- PRINT_WARN("Retrying to do IDX activates.\n");
+ QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
+ dev_name(&card->gdev->dev));
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
@@ -3792,9 +3813,16 @@ retry:
return rc;
}
- qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
- if (qdio_ssqd)
- mpno = qdio_ssqd->pcnt;
+ ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
+ if (!ssqd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
+ if (rc == 0)
+ mpno = ssqd->pcnt;
+ kfree(ssqd);
+
if (mpno)
mpno = min(mpno - 1, QETH_MAX_PORTNO);
if (card->info.portno > mpno) {
@@ -3834,7 +3862,10 @@ retry:
}
return 0;
out:
- PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc);
+ dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
+ "an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
+ dev_name(&card->gdev->dev), rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@@ -4054,8 +4085,8 @@ int qeth_core_load_discipline(struct qeth_card *card,
break;
}
if (!card->discipline.ccwgdriver) {
- PRINT_ERR("Support for discipline %d not present\n",
- discipline);
+ dev_err(&card->gdev->dev, "There is no kernel module to "
+ "support discipline %d\n", discipline);
rc = -EINVAL;
}
return rc;
@@ -4448,7 +4479,7 @@ static int __init qeth_core_init(void)
{
int rc;
- PRINT_INFO("loading core functions\n");
+ pr_info("loading core functions\n");
INIT_LIST_HEAD(&qeth_core_card_list.list);
rwlock_init(&qeth_core_card_list.rwlock);
@@ -4488,9 +4519,10 @@ driver_err:
ccwgroup_err:
ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
+ QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
qeth_unregister_dbf_views();
out_err:
- PRINT_ERR("Initialization failed with code %d\n", rc);
+ pr_err("Initializing the qeth device driver failed\n");
return rc;
}
@@ -4503,7 +4535,7 @@ static void __exit qeth_core_exit(void)
ccw_driver_unregister(&qeth_ccw_driver);
kmem_cache_destroy(qeth_core_header_cache);
qeth_unregister_dbf_views();
- PRINT_INFO("core functions removed\n");
+ pr_info("core functions removed\n");
}
module_init(qeth_core_init);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 1b1e80336d2c..af15bc648ba1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -8,6 +8,9 @@
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
@@ -503,12 +506,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
OSA_ADDR_LEN);
- PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "successfully registered on device %s\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5],
- card->dev->name);
+ dev_info(&card->gdev->dev,
+ "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
+ "successfully registered on device %s\n",
+ card->dev->dev_addr[0], card->dev->dev_addr[1],
+ card->dev->dev_addr[2], card->dev->dev_addr[3],
+ card->dev->dev_addr[4], card->dev->dev_addr[5],
+ card->dev->name);
}
return 0;
}
@@ -1015,9 +1019,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
if (rc == 0xe080) {
- PRINT_WARN("LAN on card %s if offline! "
- "Waiting for STARTLAN from card.\n",
- CARD_BUS_ID(card));
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
card->lan_online = 0;
}
return rc;
@@ -1117,8 +1120,8 @@ static int qeth_l2_recover(void *ptr)
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
QETH_DBF_TEXT(TRACE, 2, "recover2");
- PRINT_WARN("Recovery of device %s started ...\n",
- CARD_BUS_ID(card));
+ dev_warn(&card->gdev->dev,
+ "A recovery process has been started for the device\n");
card->use_hard_stop = 1;
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
@@ -1126,27 +1129,27 @@ static int qeth_l2_recover(void *ptr)
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
if (!rc)
- PRINT_INFO("Device %s successfully recovered!\n",
- CARD_BUS_ID(card));
+ dev_info(&card->gdev->dev,
+ "Device successfully recovered!\n");
else {
rtnl_lock();
dev_close(card->dev);
rtnl_unlock();
- PRINT_INFO("Device %s could not be recovered!\n",
- CARD_BUS_ID(card));
+ dev_warn(&card->gdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
}
return 0;
}
static int __init qeth_l2_init(void)
{
- PRINT_INFO("register layer 2 discipline\n");
+ pr_info("register layer 2 discipline\n");
return 0;
}
static void __exit qeth_l2_exit(void)
{
- PRINT_INFO("unregister layer 2 discipline\n");
+ pr_info("unregister layer 2 discipline\n");
}
static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ed59fedd5922..c0b30b25a5f1 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -8,6 +8,9 @@
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
@@ -917,8 +920,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
if (rc) {
QETH_DBF_TEXT(TRACE, 2, "FAILED");
qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
- PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n",
- buf, rc, rc);
+ dev_warn(&card->gdev->dev,
+ "Registering IP address %s failed\n", buf);
}
return rc;
}
@@ -1029,24 +1032,22 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "setadprm");
if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
- PRINT_WARN("set adapter parameters not supported "
- "on device %s.\n",
- CARD_BUS_ID(card));
+ dev_info(&card->gdev->dev,
+ "set adapter parameters not supported.\n");
QETH_DBF_TEXT(SETUP, 2, " notsupp");
return 0;
}
rc = qeth_query_setadapterparms(card);
if (rc) {
- PRINT_WARN("couldn't set adapter parameters on device %s: "
- "x%x\n", CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: "
+ "0x%x\n", card->gdev->dev.bus_id, rc);
return rc;
}
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc)
- PRINT_WARN("couldn't get MAC address on "
- "device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ dev_warn(&card->gdev->dev, "Reading the adapter MAC"
+ " address failed\n", rc);
}
if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
@@ -1160,16 +1161,17 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "ipaarp");
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
- PRINT_WARN("ARP processing not supported "
- "on %s!\n", QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "ARP processing not supported on %s!\n",
+ QETH_CARD_IFNAME(card));
return 0;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_START, 0);
if (rc) {
- PRINT_WARN("Could not start ARP processing "
- "assist on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Starting ARP processing support for %s failed\n",
+ QETH_CARD_IFNAME(card));
}
return rc;
}
@@ -1181,19 +1183,21 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
- PRINT_INFO("Hardware IP fragmentation not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Hardware IP fragmentation not supported on %s\n",
+ QETH_CARD_IFNAME(card));
return -EOPNOTSUPP;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION,
IPA_CMD_ASS_START, 0);
if (rc) {
- PRINT_WARN("Could not start Hardware IP fragmentation "
- "assist on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Starting IP fragmentation support for %s failed\n",
+ QETH_CARD_IFNAME(card));
} else
- PRINT_INFO("Hardware IP fragmentation enabled \n");
+ dev_info(&card->gdev->dev,
+ "Hardware IP fragmentation enabled \n");
return rc;
}
@@ -1207,17 +1211,18 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
- PRINT_INFO("Inbound source address not "
- "supported on %s\n", QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Inbound source address not supported on %s\n",
+ QETH_CARD_IFNAME(card));
return -EOPNOTSUPP;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC,
IPA_CMD_ASS_START, 0);
if (rc)
- PRINT_WARN("Could not start inbound source "
- "assist on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Starting proxy ARP support for %s failed\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
@@ -1228,19 +1233,19 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "strtvlan");
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
- PRINT_WARN("VLAN not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
return -EOPNOTSUPP;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO,
IPA_CMD_ASS_START, 0);
if (rc) {
- PRINT_WARN("Could not start vlan "
- "assist on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Starting VLAN support for %s failed\n",
+ QETH_CARD_IFNAME(card));
} else {
- PRINT_INFO("VLAN enabled \n");
+ dev_info(&card->gdev->dev, "VLAN enabled\n");
}
return rc;
}
@@ -1252,19 +1257,20 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "stmcast");
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
- PRINT_WARN("Multicast not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Multicast not supported on %s\n",
+ QETH_CARD_IFNAME(card));
return -EOPNOTSUPP;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING,
IPA_CMD_ASS_START, 0);
if (rc) {
- PRINT_WARN("Could not start multicast "
- "assist on %s: rc=%i\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Starting multicast support for %s failed\n",
+ QETH_CARD_IFNAME(card));
} else {
- PRINT_INFO("Multicast enabled\n");
+ dev_info(&card->gdev->dev, "Multicast enabled\n");
card->dev->flags |= IFF_MULTICAST;
}
return rc;
@@ -1315,36 +1321,37 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6);
if (rc) {
- PRINT_ERR("IPv6 query ipassist failed on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_err(&card->gdev->dev,
+ "Activating IPv6 support for %s failed\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6,
IPA_CMD_ASS_START, 3);
if (rc) {
- PRINT_WARN("IPv6 start assist (version 4) failed "
- "on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_err(&card->gdev->dev,
+ "Activating IPv6 support for %s failed\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
IPA_CMD_ASS_START);
if (rc) {
- PRINT_WARN("IPV6 start assist (version 6) failed "
- "on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_err(&card->gdev->dev,
+ "Activating IPv6 support for %s failed\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
IPA_CMD_ASS_START);
if (rc) {
- PRINT_WARN("Could not enable passthrough "
- "on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Enabling the passthrough mode for %s failed\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
out:
- PRINT_INFO("IPV6 enabled \n");
+ dev_info(&card->gdev->dev, "IPV6 enabled\n");
return 0;
}
#endif
@@ -1356,8 +1363,8 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
- PRINT_WARN("IPv6 not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
return 0;
}
#ifdef CONFIG_QETH_IPV6
@@ -1373,34 +1380,35 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
- PRINT_WARN("Broadcast not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Broadcast not supported on %s\n",
+ QETH_CARD_IFNAME(card));
rc = -EOPNOTSUPP;
goto out;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_START, 0);
if (rc) {
- PRINT_WARN("Could not enable broadcasting filtering "
- "on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
+ "%s failed\n", QETH_CARD_IFNAME(card));
goto out;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_CONFIGURE, 1);
if (rc) {
- PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev,
+ "Setting up broadcast filtering for %s failed\n",
+ QETH_CARD_IFNAME(card));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
- PRINT_INFO("Broadcast enabled \n");
+ dev_info(&card->gdev->dev, "Broadcast enabled\n");
rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_ENABLE, 1);
if (rc) {
- PRINT_WARN("Could not set up broadcast echo filtering on "
- "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev, "Setting up broadcast echo "
+ "filtering for %s failed\n", QETH_CARD_IFNAME(card));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
@@ -1419,18 +1427,18 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
IPA_CMD_ASS_START, 0);
if (rc) {
- PRINT_WARN("Starting Inbound HW Checksumming failed on %s: "
- "0x%x,\ncontinuing using Inbound SW Checksumming\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
+ "failed, using SW checksumming\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
IPA_CMD_ASS_ENABLE,
card->info.csum_mask);
if (rc) {
- PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: "
- "0x%x,\ncontinuing using Inbound SW Checksumming\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
+ "failed, using SW checksumming\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
return 0;
@@ -1443,26 +1451,30 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "strtcsum");
if (card->options.checksum_type == NO_CHECKSUMMING) {
- PRINT_WARN("Using no checksumming on %s.\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Using no checksumming on %s.\n",
+ QETH_CARD_IFNAME(card));
return 0;
}
if (card->options.checksum_type == SW_CHECKSUMMING) {
- PRINT_WARN("Using SW checksumming on %s.\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Using SW checksumming on %s.\n",
+ QETH_CARD_IFNAME(card));
return 0;
}
if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
- PRINT_WARN("Inbound HW Checksumming not "
- "supported on %s,\ncontinuing "
- "using Inbound SW Checksumming\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Inbound HW Checksumming not "
+ "supported on %s,\ncontinuing "
+ "using Inbound SW Checksumming\n",
+ QETH_CARD_IFNAME(card));
card->options.checksum_type = SW_CHECKSUMMING;
return 0;
}
rc = qeth_l3_send_checksum_command(card);
if (!rc)
- PRINT_INFO("HW Checksumming (inbound) enabled \n");
+ dev_info(&card->gdev->dev,
+ "HW Checksumming (inbound) enabled\n");
return rc;
}
@@ -1474,18 +1486,20 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 3, "sttso");
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
- PRINT_WARN("Outbound TSO not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ dev_info(&card->gdev->dev,
+ "Outbound TSO not supported on %s\n",
+ QETH_CARD_IFNAME(card));
rc = -EOPNOTSUPP;
} else {
rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_START, 0);
if (rc)
- PRINT_WARN("Could not start outbound TSO "
- "assist on %s: rc=%i\n",
- QETH_CARD_IFNAME(card), rc);
+ dev_warn(&card->gdev->dev, "Starting outbound TCP "
+ "segmentation offload for %s failed\n",
+ QETH_CARD_IFNAME(card));
else
- PRINT_INFO("Outbound TSO enabled\n");
+ dev_info(&card->gdev->dev,
+ "Outbound TSO enabled\n");
}
if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) {
card->options.large_send = QETH_LARGE_SEND_NO;
@@ -1578,12 +1592,8 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
else {
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
UNIQUE_ID_NOT_BY_CARD;
- PRINT_WARN("couldn't get a unique id from the card on device "
- "%s (result=x%x), using default id. ipv6 "
- "autoconfig on other lpars may lead to duplicate "
- "ip addresses. please use manually "
- "configured ones.\n",
- CARD_BUS_ID(card), cmd->hdr.return_code);
+ dev_warn(&card->gdev->dev, "The network adapter failed to "
+ "generate a unique ID\n");
}
return 0;
}
@@ -3086,9 +3096,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
if (rc == 0xe080) {
- PRINT_WARN("LAN on card %s if offline! "
- "Waiting for STARTLAN from card.\n",
- CARD_BUS_ID(card));
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
card->lan_online = 0;
}
return rc;
@@ -3194,8 +3203,8 @@ static int qeth_l3_recover(void *ptr)
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
QETH_DBF_TEXT(TRACE, 2, "recover2");
- PRINT_WARN("Recovery of device %s started ...\n",
- CARD_BUS_ID(card));
+ dev_warn(&card->gdev->dev,
+ "A recovery process has been started for the device\n");
card->use_hard_stop = 1;
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
@@ -3203,14 +3212,14 @@ static int qeth_l3_recover(void *ptr)
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
if (!rc)
- PRINT_INFO("Device %s successfully recovered!\n",
- CARD_BUS_ID(card));
+ dev_info(&card->gdev->dev,
+ "Device successfully recovered!\n");
else {
rtnl_lock();
dev_close(card->dev);
rtnl_unlock();
- PRINT_INFO("Device %s could not be recovered!\n",
- CARD_BUS_ID(card));
+ dev_warn(&card->gdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
}
return 0;
}
@@ -3344,7 +3353,7 @@ static int qeth_l3_register_notifiers(void)
return rc;
}
#else
- PRINT_WARN("layer 3 discipline no IPv6 support\n");
+ pr_warning("There is no IPv6 support for the layer 3 discipline\n");
#endif
return 0;
}
@@ -3363,7 +3372,7 @@ static int __init qeth_l3_init(void)
{
int rc = 0;
- PRINT_INFO("register layer 3 discipline\n");
+ pr_info("register layer 3 discipline\n");
rc = qeth_l3_register_notifiers();
return rc;
}
@@ -3371,7 +3380,7 @@ static int __init qeth_l3_init(void)
static void __exit qeth_l3_exit(void)
{
qeth_l3_unregister_notifiers();
- PRINT_INFO("unregister layer 3 discipline\n");
+ pr_info("unregister layer 3 discipline\n");
}
module_init(qeth_l3_init);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 3d4e3e3f3fc0..e529b55b3ce9 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -25,9 +25,15 @@
* Sven Schuetz
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/miscdevice.h>
+#include <linux/seq_file.h>
#include "zfcp_ext.h"
+#define ZFCP_BUS_ID_SIZE 20
+
static char *device;
MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
@@ -83,9 +89,9 @@ static int __init zfcp_device_setup(char *devstr)
strcpy(str, devstr);
token = strsep(&str, ",");
- if (!token || strlen(token) >= BUS_ID_SIZE)
+ if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
+ strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0,
@@ -102,7 +108,7 @@ static int __init zfcp_device_setup(char *devstr)
err_out:
kfree(str);
- pr_err("zfcp: %s is not a valid SCSI device\n", devstr);
+ pr_err("%s is not a valid SCSI device\n", devstr);
return 0;
}
@@ -186,13 +192,13 @@ static int __init zfcp_module_init(void)
retval = misc_register(&zfcp_cfdc_misc);
if (retval) {
- pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
+ pr_err("Registering the misc device zfcp_cfdc failed\n");
goto out_misc;
}
retval = zfcp_ccw_register();
if (retval) {
- pr_err("zfcp: The zfcp device driver could not register with "
+ pr_err("The zfcp device driver could not register with "
"the common I/O layer\n");
goto out_ccw_register;
}
@@ -436,6 +442,16 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
stat_work));
}
+static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
+{
+ struct zfcp_adapter *adapter =
+ container_of(sl, struct zfcp_adapter, service_level);
+
+ seq_printf(m, "zfcp: %s microcode level %x\n",
+ dev_name(&adapter->ccw_device->dev),
+ adapter->fsf_lic_version);
+}
+
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
@@ -500,6 +516,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
+ adapter->service_level.seq_print = zfcp_print_sl;
+
/* mark adapter unusable as long as sysfs registration is not complete */
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 951a8d409d1d..728147131e1d 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2002, 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "zfcp_ext.h"
/**
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index ec2abceca6dc..f1a7518e67ed 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -7,6 +7,9 @@
* Copyright IBM Corporation 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <asm/ccwdev.h>
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 31012d58cfb7..735d675623f8 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2002, 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/ctype.h>
#include <asm/debug.h>
#include "zfcp_ext.h"
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9ce4c75bd190..e19e46ae4a68 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -33,6 +33,7 @@
#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
#include "zfcp_dbf.h"
#include "zfcp_fsf.h"
@@ -515,6 +516,7 @@ struct zfcp_adapter {
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ struct service_level service_level;
atomic_t qdio_outb_full; /* queue full incidents */
};
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c557ba34e1aa..4ed4950d994b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2002, 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "zfcp_ext.h"
#define ZFCP_MAX_ERPS 3
@@ -1281,10 +1284,13 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
- if (result != ZFCP_ERP_SUCCEEDED)
+ if (result != ZFCP_ERP_SUCCEEDED) {
+ unregister_service_level(&adapter->service_level);
zfcp_erp_rports_del(adapter);
- else
+ } else {
+ register_service_level(&adapter->service_level);
schedule_work(&adapter->scan_work);
+ }
zfcp_adapter_put(adapter);
break;
}
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 8aab3091a7b1..f009f2a7ec3e 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "zfcp_ext.h"
struct ct_iu_gpn_ft_req {
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index dc0367690405..9c72e083559d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2002, 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/blktrace_api.h>
#include "zfcp_ext.h"
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 664752f90b20..d3b55fb66f13 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2002, 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "zfcp_ext.h"
/* FIXME(tune): free space should be one max. SBAL chain plus what? */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 468c880f8b6d..9dc42a68fbdd 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2002, 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "zfcp_ext.h"
#include <asm/atomic.h>
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index ca9293ba1766..899af2b45b1e 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -6,6 +6,9 @@
* Copyright IBM Corporation 2008
*/
+#define KMSG_COMPONENT "zfcp"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index c3e4ab07b9cc..0eea90781385 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -1,17 +1,21 @@
/*
* drivers/s390/sysinfo.c
*
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com)
+ * Copyright IBM Corp. 2001, 2008
+ * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com)
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
+#include <asm/cpcmd.h>
/* Sigh, math-emu. Don't ask. */
#include <asm/sfp-util.h>
@@ -271,6 +275,125 @@ static __init int create_proc_sysinfo(void)
__initcall(create_proc_sysinfo);
+/*
+ * Service levels interface.
+ */
+
+static DECLARE_RWSEM(service_level_sem);
+static LIST_HEAD(service_level_list);
+
+int register_service_level(struct service_level *slr)
+{
+ struct service_level *ptr;
+
+ down_write(&service_level_sem);
+ list_for_each_entry(ptr, &service_level_list, list)
+ if (ptr == slr) {
+ up_write(&service_level_sem);
+ return -EEXIST;
+ }
+ list_add_tail(&slr->list, &service_level_list);
+ up_write(&service_level_sem);
+ return 0;
+}
+EXPORT_SYMBOL(register_service_level);
+
+int unregister_service_level(struct service_level *slr)
+{
+ struct service_level *ptr, *next;
+ int rc = -ENOENT;
+
+ down_write(&service_level_sem);
+ list_for_each_entry_safe(ptr, next, &service_level_list, list) {
+ if (ptr != slr)
+ continue;
+ list_del(&ptr->list);
+ rc = 0;
+ break;
+ }
+ up_write(&service_level_sem);
+ return rc;
+}
+EXPORT_SYMBOL(unregister_service_level);
+
+static void *service_level_start(struct seq_file *m, loff_t *pos)
+{
+ down_read(&service_level_sem);
+ return seq_list_start(&service_level_list, *pos);
+}
+
+static void *service_level_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &service_level_list, pos);
+}
+
+static void service_level_stop(struct seq_file *m, void *p)
+{
+ up_read(&service_level_sem);
+}
+
+static int service_level_show(struct seq_file *m, void *p)
+{
+ struct service_level *slr;
+
+ slr = list_entry(p, struct service_level, list);
+ slr->seq_print(m, slr);
+ return 0;
+}
+
+static const struct seq_operations service_level_seq_ops = {
+ .start = service_level_start,
+ .next = service_level_next,
+ .stop = service_level_stop,
+ .show = service_level_show
+};
+
+static int service_level_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &service_level_seq_ops);
+}
+
+static const struct file_operations service_level_ops = {
+ .open = service_level_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+static void service_level_vm_print(struct seq_file *m,
+ struct service_level *slr)
+{
+ char *query_buffer, *str;
+
+ query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA);
+ if (!query_buffer)
+ return;
+ cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL);
+ str = strchr(query_buffer, '\n');
+ if (str)
+ *str = 0;
+ seq_printf(m, "VM: %s\n", query_buffer);
+ kfree(query_buffer);
+}
+
+static struct service_level service_level_vm = {
+ .seq_print = service_level_vm_print
+};
+
+static __init int create_proc_service_level(void)
+{
+ proc_create("service_levels", 0, NULL, &service_level_ops);
+ if (MACHINE_IS_VM)
+ register_service_level(&service_level_vm);
+ return 0;
+}
+
+subsys_initcall(create_proc_service_level);
+
+/*
+ * Bogomips calculation based on cpu capability.
+ */
+
int get_cpu_capability(unsigned int *capability)
{
struct sysinfo_1_2_2 *info;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f458c1217c5e..c41fa2af7677 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -949,7 +949,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
set_binfmt(&elf_format);
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
- retval = arch_setup_additional_pages(bprm, executable_stack);
+ retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out;
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 18546d8eb78e..36fa286adad5 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -49,7 +49,7 @@
/* memmap is virtually contigious. */
#define __pfn_to_page(pfn) (vmemmap + (pfn))
-#define __page_to_pfn(page) ((page) - vmemmap)
+#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
#elif defined(CONFIG_SPARSEMEM)
/*
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index fd70adbb3566..5e310c8d8e2f 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -337,12 +337,35 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
* established paths. This function will deal with RMDATA messages
* embedded in struct iucv_message as well.
*
+ * Locking: local_bh_enable/local_bh_disable
+ *
* Returns the result from the CP IUCV call.
*/
int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size, size_t *residual);
/**
+ * __iucv_message_receive
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * This function receives messages that are being sent to you over
+ * established paths. This function will deal with RMDATA messages
+ * embedded in struct iucv_message as well.
+ *
+ * Locking: no locking.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size,
+ size_t *residual);
+
+/**
* iucv_message_reject
* @path: address of iucv path structure
* @msg: address of iucv msg structure
@@ -386,12 +409,34 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
* transmitted is in a buffer and this is a one-way message and the
* receiver will not reply to the message.
*
+ * Locking: local_bh_enable/local_bh_disable
+ *
* Returns the result from the CP IUCV call.
*/
int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
/**
+ * __iucv_message_send
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @srccls: source class of message
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of send buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer and this is a one-way message and the
+ * receiver will not reply to the message.
+ *
+ * Locking: no locking.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+
+/**
* iucv_message_send2way
* @path: address of iucv path structure
* @msg: address of iucv msg structure
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 29f7baa25110..af3192d2a5a3 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -8,6 +8,9 @@
* Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
*/
+#define KMSG_COMPONENT "af_iucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
@@ -616,6 +619,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct iucv_sock *iucv = iucv_sk(sk);
struct sk_buff *skb;
struct iucv_message txmsg;
+ char user_id[9];
+ char appl_id[9];
int err;
err = sock_error(sk);
@@ -651,8 +656,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
err = iucv_message_send(iucv->path, &txmsg, 0, 0,
(void *) skb->data, skb->len);
if (err) {
- if (err == 3)
- printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
+ if (err == 3) {
+ user_id[8] = 0;
+ memcpy(user_id, iucv->dst_user_id, 8);
+ appl_id[8] = 0;
+ memcpy(appl_id, iucv->dst_name, 8);
+ pr_err("Application %s on z/VM guest %s"
+ " exceeds message limit\n",
+ user_id, appl_id);
+ }
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
@@ -1190,7 +1202,8 @@ static int __init afiucv_init(void)
int err;
if (!MACHINE_IS_VM) {
- printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
+ pr_err("The af_iucv module cannot be loaded"
+ " without z/VM\n");
err = -EPROTONOSUPPORT;
goto out;
}
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index d7b54b5bfa69..8f57d4f4328a 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -30,6 +30,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define KMSG_COMPONENT "iucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
@@ -424,8 +427,8 @@ static void iucv_declare_cpu(void *data)
err = "Paging or storage error";
break;
}
- printk(KERN_WARNING "iucv_register: iucv_declare_buffer "
- "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err);
+ pr_warning("Defining an interrupt buffer on CPU %i"
+ " failed with 0x%02x (%s)\n", cpu, rc, err);
return;
}
@@ -957,7 +960,52 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
EXPORT_SYMBOL(iucv_message_purge);
/**
- * iucv_message_receive
+ * iucv_message_receive_iprmdata
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * Internal function used by iucv_message_receive and __iucv_message_receive
+ * to receive RMDATA data stored in struct iucv_message.
+ */
+static int iucv_message_receive_iprmdata(struct iucv_path *path,
+ struct iucv_message *msg,
+ u8 flags, void *buffer,
+ size_t size, size_t *residual)
+{
+ struct iucv_array *array;
+ u8 *rmmsg;
+ size_t copy;
+
+ /*
+ * Message is 8 bytes long and has been stored to the
+ * message descriptor itself.
+ */
+ if (residual)
+ *residual = abs(size - 8);
+ rmmsg = msg->rmmsg;
+ if (flags & IUCV_IPBUFLST) {
+ /* Copy to struct iucv_array. */
+ size = (size < 8) ? size : 8;
+ for (array = buffer; size > 0; array++) {
+ copy = min_t(size_t, size, array->length);
+ memcpy((u8 *)(addr_t) array->address,
+ rmmsg, copy);
+ rmmsg += copy;
+ size -= copy;
+ }
+ } else {
+ /* Copy to direct buffer. */
+ memcpy(buffer, rmmsg, min_t(size_t, size, 8));
+ }
+ return 0;
+}
+
+/**
+ * __iucv_message_receive
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is received (IUCV_IPBUFLST)
@@ -969,44 +1017,19 @@ EXPORT_SYMBOL(iucv_message_purge);
* established paths. This function will deal with RMDATA messages
* embedded in struct iucv_message as well.
*
+ * Locking: no locking
+ *
* Returns the result from the CP IUCV call.
*/
-int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
- u8 flags, void *buffer, size_t size, size_t *residual)
+int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual)
{
union iucv_param *parm;
- struct iucv_array *array;
- u8 *rmmsg;
- size_t copy;
int rc;
- if (msg->flags & IUCV_IPRMDATA) {
- /*
- * Message is 8 bytes long and has been stored to the
- * message descriptor itself.
- */
- rc = (size < 8) ? 5 : 0;
- if (residual)
- *residual = abs(size - 8);
- rmmsg = msg->rmmsg;
- if (flags & IUCV_IPBUFLST) {
- /* Copy to struct iucv_array. */
- size = (size < 8) ? size : 8;
- for (array = buffer; size > 0; array++) {
- copy = min_t(size_t, size, array->length);
- memcpy((u8 *)(addr_t) array->address,
- rmmsg, copy);
- rmmsg += copy;
- size -= copy;
- }
- } else {
- /* Copy to direct buffer. */
- memcpy(buffer, rmmsg, min_t(size_t, size, 8));
- }
- return 0;
- }
-
- local_bh_disable();
+ if (msg->flags & IUCV_IPRMDATA)
+ return iucv_message_receive_iprmdata(path, msg, flags,
+ buffer, size, residual);
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ipbfadr1 = (u32)(addr_t) buffer;
@@ -1022,6 +1045,37 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (residual)
*residual = parm->db.ipbfln1f;
}
+ return rc;
+}
+EXPORT_SYMBOL(__iucv_message_receive);
+
+/**
+ * iucv_message_receive
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * This function receives messages that are being sent to you over
+ * established paths. This function will deal with RMDATA messages
+ * embedded in struct iucv_message as well.
+ *
+ * Locking: local_bh_enable/local_bh_disable
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual)
+{
+ int rc;
+
+ if (msg->flags & IUCV_IPRMDATA)
+ return iucv_message_receive_iprmdata(path, msg, flags,
+ buffer, size, residual);
+ local_bh_disable();
+ rc = __iucv_message_receive(path, msg, flags, buffer, size, residual);
local_bh_enable();
return rc;
}
@@ -1101,7 +1155,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
EXPORT_SYMBOL(iucv_message_reply);
/**
- * iucv_message_send
+ * __iucv_message_send
* @path: address of iucv path structure
* @msg: address of iucv msg structure
* @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
@@ -1113,15 +1167,16 @@ EXPORT_SYMBOL(iucv_message_reply);
* transmitted is in a buffer and this is a one-way message and the
* receiver will not reply to the message.
*
+ * Locking: no locking
+ *
* Returns the result from the CP IUCV call.
*/
-int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size)
{
union iucv_param *parm;
int rc;
- local_bh_disable();
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (flags & IUCV_IPRMDATA) {
@@ -1144,6 +1199,34 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
rc = iucv_call_b2f0(IUCV_SEND, parm);
if (!rc)
msg->id = parm->db.ipmsgid;
+ return rc;
+}
+EXPORT_SYMBOL(__iucv_message_send);
+
+/**
+ * iucv_message_send
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @srccls: source class of message
+ * @buffer: address of send buffer or address of struct iucv_array
+ * @size: length of send buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer and this is a one-way message and the
+ * receiver will not reply to the message.
+ *
+ * Locking: local_bh_enable/local_bh_disable
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size)
+{
+ int rc;
+
+ local_bh_disable();
+ rc = __iucv_message_send(path, msg, flags, srccls, buffer, size);
local_bh_enable();
return rc;
}
@@ -1572,7 +1655,7 @@ static void iucv_external_interrupt(u16 code)
BUG_ON(p->iptype < 0x01 || p->iptype > 0x09);
work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
if (!work) {
- printk(KERN_WARNING "iucv_external_interrupt: out of memory\n");
+ pr_warning("iucv_external_interrupt: out of memory\n");
return;
}
memcpy(&work->data, p, sizeof(work->data));