summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/bitops.h65
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/ccwgroup.h4
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/cmpxchg.h225
-rw-r--r--arch/s390/include/asm/system.h196
-rw-r--r--arch/s390/include/asm/types.h8
-rw-r--r--arch/s390/include/asm/unistd.h6
-rw-r--r--arch/s390/kernel/compat_wrapper.S27
-rw-r--r--arch/s390/kernel/early.c22
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/reipl64.S2
-rw-r--r--arch/s390/kernel/setup.c90
-rw-r--r--arch/s390/kernel/switch_cpu.S4
-rw-r--r--arch/s390/kernel/switch_cpu64.S4
-rw-r--r--arch/s390/kernel/syscalls.S4
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kernel/vdso.c6
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/math-emu/Makefile2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/oprofile/Makefile3
-rw-r--r--arch/s390/oprofile/hwsampler.c6
-rw-r--r--arch/s390/oprofile/init.c15
29 files changed, 403 insertions, 314 deletions
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 8800cf090694..635d677d3281 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -6,7 +6,7 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \
tr -c '[0-9A-Za-z]' '_'`__`date | \
tr -c '[0-9A-Za-z]' '_'`_t
-EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
+ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
targets := image
targets += bzImage
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 5c5ba10384c2..d9db13810d15 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -9,7 +9,7 @@
*
* Atomic operations that C can't guarantee us.
* Useful for resource counting etc.
- * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
+ * s390 uses 'Compare And Swap' for atomicity in SMP environment.
*
*/
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 2e05972c5085..e1c8f3a49884 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -742,18 +742,42 @@ static inline int sched_find_first_bit(unsigned long *b)
* 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
*/
-#define ext2_set_bit(nr, addr) \
- __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
-#define ext2_test_bit(nr, addr) \
- test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
-
-static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
+static inline void __set_bit_le(unsigned long nr, void *addr)
+{
+ __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline void __clear_bit_le(unsigned long nr, void *addr)
+{
+ __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline int __test_and_set_bit_le(unsigned long nr, void *addr)
+{
+ return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline int test_and_set_bit_le(unsigned long nr, void *addr)
+{
+ return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline int __test_and_clear_bit_le(unsigned long nr, void *addr)
+{
+ return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline int test_and_clear_bit_le(unsigned long nr, void *addr)
+{
+ return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline int test_bit_le(unsigned long nr, const void *addr)
+{
+ return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
+}
+
+static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
{
unsigned long bytes, bits;
@@ -764,7 +788,7 @@ static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
return (bits < size) ? bits : size;
}
-static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size,
+static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
unsigned long offset)
{
unsigned long *addr = vaddr, *p;
@@ -790,11 +814,10 @@ static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size,
size -= __BITOPS_WORDSIZE;
p++;
}
- return offset + ext2_find_first_zero_bit(p, size);
+ return offset + find_first_zero_bit_le(p, size);
}
-static inline unsigned long ext2_find_first_bit(void *vaddr,
- unsigned long size)
+static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
{
unsigned long bytes, bits;
@@ -805,7 +828,7 @@ static inline unsigned long ext2_find_first_bit(void *vaddr,
return (bits < size) ? bits : size;
}
-static inline int ext2_find_next_bit(void *vaddr, unsigned long size,
+static inline int find_next_bit_le(void *vaddr, unsigned long size,
unsigned long offset)
{
unsigned long *addr = vaddr, *p;
@@ -831,10 +854,14 @@ static inline int ext2_find_next_bit(void *vaddr, unsigned long size,
size -= __BITOPS_WORDSIZE;
p++;
}
- return offset + ext2_find_first_bit(p, size);
+ return offset + find_first_bit_le(p, size);
}
-#include <asm-generic/bitops/minix.h>
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit_le(nr, addr)
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit_le(nr, addr)
+
#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index ff6f62e0ec3e..623f2fb71774 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -112,7 +112,6 @@ enum uc_todo {
/**
* struct ccw driver - device driver for channel attached devices
- * @owner: owning module
* @ids: ids supported by this driver
* @probe: function called on probe
* @remove: function called on remove
@@ -128,10 +127,8 @@ enum uc_todo {
* @restore: callback for restoring after hibernation
* @uc_handler: callback for unit check handler
* @driver: embedded device driver structure
- * @name: device driver name
*/
struct ccw_driver {
- struct module *owner;
struct ccw_device_id *ids;
int (*probe) (struct ccw_device *);
void (*remove) (struct ccw_device *);
@@ -147,7 +144,6 @@ struct ccw_driver {
int (*restore)(struct ccw_device *);
enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
struct device_driver driver;
- char *name;
};
extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index c79c1e787b86..f2ea2c56a7e1 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -29,8 +29,6 @@ struct ccwgroup_device {
/**
* struct ccwgroup_driver - driver for ccw group devices
- * @owner: driver owner
- * @name: driver name
* @max_slaves: maximum number of slave devices
* @driver_id: unique id
* @probe: function called on probe
@@ -46,8 +44,6 @@ struct ccwgroup_device {
* @driver: embedded driver structure
*/
struct ccwgroup_driver {
- struct module *owner;
- char *name;
int max_slaves;
unsigned long driver_id;
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index e34347d567a6..fc50a3342da3 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -183,7 +183,7 @@ struct esw3 {
* The irb that is handed to the device driver when an interrupt occurs. For
* solicited interrupts, the common I/O layer already performs checks whether
* a field is valid; a field not being valid is always passed as %0.
- * If a unit check occured, @ecw may contain sense data; this is retrieved
+ * If a unit check occurred, @ecw may contain sense data; this is retrieved
* by the common I/O layer itself if the device doesn't support concurrent
* sense (so that the device driver never needs to perform basic sene itself).
* For unsolicited interrupts, the irb is passed as-is (expect for sense data,
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..7488e52efa97
--- /dev/null
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright IBM Corp. 1999, 2011
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ */
+
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <linux/types.h>
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
+{
+ unsigned long addr, old;
+ int shift;
+
+ switch (size) {
+ case 1:
+ addr = (unsigned long) ptr;
+ shift = (3 ^ (addr & 3)) << 3;
+ addr ^= addr & 3;
+ asm volatile(
+ " l %0,%4\n"
+ "0: lr 0,%0\n"
+ " nr 0,%3\n"
+ " or 0,%2\n"
+ " cs %0,0,%4\n"
+ " jl 0b\n"
+ : "=&d" (old), "=Q" (*(int *) addr)
+ : "d" (x << shift), "d" (~(255 << shift)),
+ "Q" (*(int *) addr) : "memory", "cc", "0");
+ return old >> shift;
+ case 2:
+ addr = (unsigned long) ptr;
+ shift = (2 ^ (addr & 2)) << 3;
+ addr ^= addr & 2;
+ asm volatile(
+ " l %0,%4\n"
+ "0: lr 0,%0\n"
+ " nr 0,%3\n"
+ " or 0,%2\n"
+ " cs %0,0,%4\n"
+ " jl 0b\n"
+ : "=&d" (old), "=Q" (*(int *) addr)
+ : "d" (x << shift), "d" (~(65535 << shift)),
+ "Q" (*(int *) addr) : "memory", "cc", "0");
+ return old >> shift;
+ case 4:
+ asm volatile(
+ " l %0,%3\n"
+ "0: cs %0,%2,%3\n"
+ " jl 0b\n"
+ : "=&d" (old), "=Q" (*(int *) ptr)
+ : "d" (x), "Q" (*(int *) ptr)
+ : "memory", "cc");
+ return old;
+#ifdef CONFIG_64BIT
+ case 8:
+ asm volatile(
+ " lg %0,%3\n"
+ "0: csg %0,%2,%3\n"
+ " jl 0b\n"
+ : "=&d" (old), "=m" (*(long *) ptr)
+ : "d" (x), "Q" (*(long *) ptr)
+ : "memory", "cc");
+ return old;
+#endif /* CONFIG_64BIT */
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
+ __ret; \
+})
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long addr, prev, tmp;
+ int shift;
+
+ switch (size) {
+ case 1:
+ addr = (unsigned long) ptr;
+ shift = (3 ^ (addr & 3)) << 3;
+ addr ^= addr & 3;
+ asm volatile(
+ " l %0,%2\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%3\n"
+ " or %1,%4\n"
+ " cs %0,%1,%2\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
+ : "d" (old << shift), "d" (new << shift),
+ "d" (~(255 << shift)), "Q" (*(int *) ptr)
+ : "memory", "cc");
+ return prev >> shift;
+ case 2:
+ addr = (unsigned long) ptr;
+ shift = (2 ^ (addr & 2)) << 3;
+ addr ^= addr & 2;
+ asm volatile(
+ " l %0,%2\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%3\n"
+ " or %1,%4\n"
+ " cs %0,%1,%2\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
+ : "d" (old << shift), "d" (new << shift),
+ "d" (~(65535 << shift)), "Q" (*(int *) ptr)
+ : "memory", "cc");
+ return prev >> shift;
+ case 4:
+ asm volatile(
+ " cs %0,%3,%1\n"
+ : "=&d" (prev), "=Q" (*(int *) ptr)
+ : "0" (old), "d" (new), "Q" (*(int *) ptr)
+ : "memory", "cc");
+ return prev;
+#ifdef CONFIG_64BIT
+ case 8:
+ asm volatile(
+ " csg %0,%3,%1\n"
+ : "=&d" (prev), "=Q" (*(long *) ptr)
+ : "0" (old), "d" (new), "Q" (*(long *) ptr)
+ : "memory", "cc");
+ return prev;
+#endif /* CONFIG_64BIT */
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#ifdef CONFIG_64BIT
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+#else /* CONFIG_64BIT */
+static inline unsigned long long __cmpxchg64(void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ register_pair rp_old = {.pair = old};
+ register_pair rp_new = {.pair = new};
+
+ asm volatile(
+ " cds %0,%2,%1"
+ : "+&d" (rp_old), "=Q" (ptr)
+ : "d" (rp_new), "Q" (ptr)
+ : "cc");
+ return rp_old.pair;
+}
+#define cmpxchg64(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+#endif /* CONFIG_64BIT */
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ return __cmpxchg(ptr, old, new, size);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
+
+#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 8f8d759f6a7b..d382629a0172 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -14,6 +14,7 @@
#include <asm/setup.h>
#include <asm/processor.h>
#include <asm/lowcore.h>
+#include <asm/cmpxchg.h>
#ifdef __KERNEL__
@@ -120,161 +121,6 @@ extern int memcpy_real(void *, void *, size_t);
#define nop() asm volatile("nop")
-#define xchg(ptr,x) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
- __ret; \
-})
-
-extern void __xchg_called_with_bad_pointer(void);
-
-static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
-{
- unsigned long addr, old;
- int shift;
-
- switch (size) {
- case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
- asm volatile(
- " l %0,%4\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%4\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) addr)
- : "d" (x << shift), "d" (~(255 << shift)),
- "Q" (*(int *) addr) : "memory", "cc", "0");
- return old >> shift;
- case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
- asm volatile(
- " l %0,%4\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%4\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) addr)
- : "d" (x << shift), "d" (~(65535 << shift)),
- "Q" (*(int *) addr) : "memory", "cc", "0");
- return old >> shift;
- case 4:
- asm volatile(
- " l %0,%3\n"
- "0: cs %0,%2,%3\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) ptr)
- : "d" (x), "Q" (*(int *) ptr)
- : "memory", "cc");
- return old;
-#ifdef __s390x__
- case 8:
- asm volatile(
- " lg %0,%3\n"
- "0: csg %0,%2,%3\n"
- " jl 0b\n"
- : "=&d" (old), "=m" (*(long *) ptr)
- : "d" (x), "Q" (*(long *) ptr)
- : "memory", "cc");
- return old;
-#endif /* __s390x__ */
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- unsigned long addr, prev, tmp;
- int shift;
-
- switch (size) {
- case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
- asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
- "1:"
- : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
- : "d" (old << shift), "d" (new << shift),
- "d" (~(255 << shift)), "Q" (*(int *) ptr)
- : "memory", "cc");
- return prev >> shift;
- case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
- asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
- "1:"
- : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
- : "d" (old << shift), "d" (new << shift),
- "d" (~(65535 << shift)), "Q" (*(int *) ptr)
- : "memory", "cc");
- return prev >> shift;
- case 4:
- asm volatile(
- " cs %0,%3,%1\n"
- : "=&d" (prev), "=Q" (*(int *) ptr)
- : "0" (old), "d" (new), "Q" (*(int *) ptr)
- : "memory", "cc");
- return prev;
-#ifdef __s390x__
- case 8:
- asm volatile(
- " csg %0,%3,%1\n"
- : "=&d" (prev), "=Q" (*(long *) ptr)
- : "0" (old), "d" (new), "Q" (*(long *) ptr)
- : "memory", "cc");
- return prev;
-#endif /* __s390x__ */
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -353,46 +199,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
__ctl_load(__dummy, cr, cr); \
})
-#include <linux/irqflags.h>
-
-#include <asm-generic/cmpxchg-local.h>
-
-static inline unsigned long __cmpxchg_local(volatile void *ptr,
- unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- case 2:
- case 4:
-#ifdef __s390x__
- case 8:
-#endif
- return __cmpxchg(ptr, old, new, size);
- default:
- return __cmpxchg_local_generic(ptr, old, new, size);
- }
-
- return old;
-}
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#ifdef __s390x__
-#define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
-#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#endif
-
/*
* Use to set psw mask except for the first byte which
* won't be changed by this function.
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 04d6b95a89c6..eeb52ccf499f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -30,14 +30,6 @@ typedef __signed__ long saddr_t;
#ifndef __ASSEMBLY__
-typedef u64 dma64_addr_t;
-#ifdef __s390x__
-/* DMA addresses come in 32-bit and 64-bit flavours. */
-typedef u64 dma_addr_t;
-#else
-typedef u32 dma_addr_t;
-#endif
-
#ifndef __s390x__
typedef union {
unsigned long long pair;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 1049ef27c15e..e82152572377 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -272,7 +272,11 @@
#define __NR_fanotify_init 332
#define __NR_fanotify_mark 333
#define __NR_prlimit64 334
-#define NR_syscalls 335
+#define __NR_name_to_handle_at 335
+#define __NR_open_by_handle_at 336
+#define __NR_clock_adjtime 337
+#define __NR_syncfs 338
+#define NR_syscalls 339
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 8e60fb23b90d..1dc96ea08fa8 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1877,3 +1877,30 @@ sys_prlimit64_wrapper:
llgtr %r4,%r4 # const struct rlimit64 __user *
llgtr %r5,%r5 # struct rlimit64 __user *
jg sys_prlimit64 # branch to system call
+
+ .globl sys_name_to_handle_at_wrapper
+sys_name_to_handle_at_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # const char __user *
+ llgtr %r4,%r4 # struct file_handle __user *
+ llgtr %r5,%r5 # int __user *
+ lgfr %r6,%r6 # int
+ jg sys_name_to_handle_at
+
+ .globl compat_sys_open_by_handle_at_wrapper
+compat_sys_open_by_handle_at_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # struct file_handle __user *
+ lgfr %r4,%r4 # int
+ jg compat_sys_open_by_handle_at
+
+ .globl compat_sys_clock_adjtime_wrapper
+compat_sys_clock_adjtime_wrapper:
+ lgfr %r2,%r2 # clockid_t (int)
+ llgtr %r3,%r3 # struct compat_timex __user *
+ jg compat_sys_clock_adjtime
+
+ .globl sys_syncfs_wrapper
+sys_syncfs_wrapper:
+ lgfr %r2,%r2 # int
+ jg sys_syncfs
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 3b7e7dddc324..068f8465c4ee 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -94,6 +94,7 @@ static noinline __init void create_kernel_nss(void)
unsigned int sinitrd_pfn, einitrd_pfn;
#endif
int response;
+ int hlen;
size_t len;
char *savesys_ptr;
char defsys_cmd[DEFSYS_CMD_SIZE];
@@ -124,24 +125,27 @@ static noinline __init void create_kernel_nss(void)
end_pfn = PFN_UP(__pa(&_end));
min_size = end_pfn << 2;
- sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
- kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
- eshared_pfn, end_pfn);
+ hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
+ "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
+ kernel_nss_name, stext_pfn - 1, stext_pfn,
+ eshared_pfn - 1, eshared_pfn, end_pfn);
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE) {
sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
min_size = einitrd_pfn << 2;
- sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
- sinitrd_pfn, einitrd_pfn);
+ hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
+ " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
}
#endif
- sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
- defsys_cmd, min_size);
- sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
- kernel_nss_name, kernel_nss_name);
+ snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
+ " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
+ defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
+ snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
+ kernel_nss_name, kernel_nss_name);
+ savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
__cpcmd(defsys_cmd, NULL, 0, &response);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7061398341d5..fb317bf2c378 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -460,7 +460,7 @@ startup:
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
- stfl __LC_STFL_FAC_LIST # store facility list
+ .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
la %r0,0
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 5e73dee63baa..9eabbc90795d 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -78,7 +78,7 @@ do_reipl_asm: basr %r13,0
* in the ESA psw.
* Bit 31 of the addresses has to be 0 for the
* 31bit lpswe instruction a fact they appear to have
- * ommited from the pop.
+ * omitted from the pop.
*/
.Lnewpsw: .quad 0x0000000080000000
.quad .Lpg1
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 6f6350826c81..f5434d1ecb31 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -102,16 +102,6 @@ EXPORT_SYMBOL(lowcore_ptr);
#include <asm/setup.h>
-static struct resource code_resource = {
- .name = "Kernel code",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
-};
-
-static struct resource data_resource = {
- .name = "Kernel data",
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
-};
-
/*
* condev= and conmode= setup parameter.
*/
@@ -436,21 +426,43 @@ setup_lowcore(void)
lowcore_ptr[0] = lc;
}
-static void __init
-setup_resources(void)
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource bss_resource = {
+ .name = "Kernel bss",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource __initdata *standard_resources[] = {
+ &code_resource,
+ &data_resource,
+ &bss_resource,
+};
+
+static void __init setup_resources(void)
{
- struct resource *res, *sub_res;
- int i;
+ struct resource *res, *std_res, *sub_res;
+ int i, j;
code_resource.start = (unsigned long) &_text;
code_resource.end = (unsigned long) &_etext - 1;
data_resource.start = (unsigned long) &_etext;
data_resource.end = (unsigned long) &_edata - 1;
+ bss_resource.start = (unsigned long) &__bss_start;
+ bss_resource.end = (unsigned long) &__bss_stop - 1;
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size)
continue;
- res = alloc_bootmem_low(sizeof(struct resource));
+ res = alloc_bootmem_low(sizeof(*res));
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
switch (memory_chunk[i].type) {
case CHUNK_READ_WRITE:
@@ -464,40 +476,24 @@ setup_resources(void)
res->name = "reserved";
}
res->start = memory_chunk[i].addr;
- res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
+ res->end = res->start + memory_chunk[i].size - 1;
request_resource(&iomem_resource, res);
- if (code_resource.start >= res->start &&
- code_resource.start <= res->end &&
- code_resource.end > res->end) {
- sub_res = alloc_bootmem_low(sizeof(struct resource));
- memcpy(sub_res, &code_resource,
- sizeof(struct resource));
- sub_res->end = res->end;
- code_resource.start = res->end + 1;
- request_resource(res, sub_res);
- }
-
- if (code_resource.start >= res->start &&
- code_resource.start <= res->end &&
- code_resource.end <= res->end)
- request_resource(res, &code_resource);
-
- if (data_resource.start >= res->start &&
- data_resource.start <= res->end &&
- data_resource.end > res->end) {
- sub_res = alloc_bootmem_low(sizeof(struct resource));
- memcpy(sub_res, &data_resource,
- sizeof(struct resource));
- sub_res->end = res->end;
- data_resource.start = res->end + 1;
- request_resource(res, sub_res);
+ for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
+ std_res = standard_resources[j];
+ if (std_res->start < res->start ||
+ std_res->start > res->end)
+ continue;
+ if (std_res->end > res->end) {
+ sub_res = alloc_bootmem_low(sizeof(*sub_res));
+ *sub_res = *std_res;
+ sub_res->end = res->end;
+ std_res->start = res->end + 1;
+ request_resource(res, sub_res);
+ } else {
+ request_resource(res, std_res);
+ }
}
-
- if (data_resource.start >= res->start &&
- data_resource.start <= res->end &&
- data_resource.end <= res->end)
- request_resource(res, &data_resource);
}
}
@@ -712,7 +708,7 @@ static void __init setup_hwcaps(void)
* and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
* as stored by stfl, bits 32-xxx contain additional facilities.
* How many facility words are stored depends on the number of
- * doublewords passed to the instruction. The additional facilites
+ * doublewords passed to the instruction. The additional facilities
* are:
* Bit 42: decimal floating point facility is installed
* Bit 44: perform floating point operation facility is installed
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
index 469f11b574fa..20530dd2eab1 100644
--- a/arch/s390/kernel/switch_cpu.S
+++ b/arch/s390/kernel/switch_cpu.S
@@ -46,7 +46,9 @@ smp_restart_cpu:
ltr %r4,%r4 /* New stack ? */
jz 1f
lr %r15,%r4
-1: basr %r14,%r2
+1: lr %r14,%r2 /* r14: Function to call */
+ lr %r2,%r3 /* r2 : Parameter for function*/
+ basr %r14,%r14 /* Call function */
.gprregs_addr:
.long .gprregs
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
index d94aacc898cb..5be3f43898f9 100644
--- a/arch/s390/kernel/switch_cpu64.S
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -42,7 +42,9 @@ smp_restart_cpu:
ltgr %r4,%r4 /* New stack ? */
jz 1f
lgr %r15,%r4
-1: basr %r14,%r2
+1: lgr %r14,%r2 /* r14: Function to call */
+ lgr %r2,%r3 /* r2 : Parameter for function*/
+ basr %r14,%r14 /* Call function */
.section .data,"aw",@progbits
.gprregs:
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a8fee1b14395..9c65fd4ddce0 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -343,3 +343,7 @@ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
+SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
+SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper)
+SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
+SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 9e7b039458da..87be655557aa 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -724,7 +724,7 @@ static void clock_sync_cpu(struct clock_sync_data *sync)
}
/*
- * Sync the TOD clock using the port refered to by aibp. This port
+ * Sync the TOD clock using the port referred to by aibp. This port
* has to be enabled and the other port has to be disabled. The
* last eacr update has to be more than 1.6 seconds in the past.
*/
@@ -1012,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work)
eacr = etr_handle_update(&aib, eacr);
/*
- * Select ports to enable. The prefered synchronization mode is PPS.
+ * Select ports to enable. The preferred synchronization mode is PPS.
* If a port can be enabled depends on a number of things:
* 1) The port needs to be online and uptodate. A port is not
* disabled just because it is not uptodate, but it is only
@@ -1091,7 +1091,7 @@ static void etr_work_fn(struct work_struct *work)
/*
* Update eacr and try to synchronize the clock. If the update
* of eacr caused a stepping port switch (or if we have to
- * assume that a stepping port switch has occured) or the
+ * assume that a stepping port switch has occurred) or the
* clock syncing failed, reset the sync check control bit
* and set up a timer to try again after 0.5 seconds
*/
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index f438d74dedbd..d73630b4fe1d 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -337,17 +337,17 @@ static int __init vdso_init(void)
}
arch_initcall(vdso_init);
-int in_gate_area_no_task(unsigned long addr)
+int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
-int in_gate_area(struct task_struct *task, unsigned long addr)
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 1ccdf4d8aa85..5e8ead4b4aba 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -44,7 +44,7 @@ static inline void set_vtimer(__u64 expires)
__u64 timer;
asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " SPT %1" /* Set new value immediatly afterwards */
+ " SPT %1" /* Set new value immediately afterwards */
: "=m" (timer) : "m" (expires) );
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
S390_lowcore.last_update_timer = expires;
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index e5221ec0b8e3..860d26514c08 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -8,7 +8,7 @@
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
-EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm
+ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bade533ba288..30ca85cce314 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -721,7 +721,7 @@ static int __init kvm_s390_init(void)
/*
* guests can ask for up to 255+1 double words, we need a full page
- * to hold the maximum amount of facilites. On the other hand, we
+ * to hold the maximum amount of facilities. On the other hand, we
* only set facilities that are known to work in KVM.
*/
facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 9194a4b52b22..73c47bd95db3 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -311,7 +311,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
/*
* a lot of B2 instructions are priviledged. We first check for
- * the priviledges ones, that we can handle in the kernel. If the
+ * the privileged ones, that we can handle in the kernel. If the
* kernel can handle this instruction, we check for the problem
* state bit and (a) handle the instruction or (b) send a code 2
* program check.
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
index c84890341052..51d399549f60 100644
--- a/arch/s390/math-emu/Makefile
+++ b/arch/s390/math-emu/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_MATHEMU) := math.o
-EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
+ccflags-y := -I$(src) -Iinclude/math-emu -w
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2c57806c0858..9217e332b118 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -392,7 +392,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
{
int fault;
- /* Protection exception is supressing, decrement psw address. */
+ /* Protection exception is suppressing, decrement psw address. */
regs->psw.addr -= (pgm_int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index d698cddcfbdd..524c4b615821 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,4 +6,5 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o hwsampler.o
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+oprofile-$(CONFIG_64BIT) += hwsampler.o
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 3d48f4db246d..4952872d6f0a 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -517,12 +517,8 @@ stop_exit:
static int check_hardware_prerequisites(void)
{
- unsigned long long facility_bits[2];
-
- memcpy(facility_bits, S390_lowcore.stfle_fac_list, 32);
- if (!(facility_bits[1] & (1ULL << 59)))
+ if (!test_facility(68))
return -EOPNOTSUPP;
-
return 0;
}
/*
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 16c76def4a9d..c63d7e58352b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -18,6 +18,11 @@
#include <linux/fs.h>
#include "../../../drivers/oprofile/oprof.h"
+
+extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
+
+#ifdef CONFIG_64BIT
+
#include "hwsampler.h"
#define DEFAULT_INTERVAL 4096
@@ -37,8 +42,6 @@ static int hwsampler_running; /* start_mutex must be held to change */
static struct oprofile_operations timer_ops;
-extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
-
static int oprofile_hwsampler_start(void)
{
int retval;
@@ -172,14 +175,22 @@ static void oprofile_hwsampler_exit(void)
hwsampler_shutdown();
}
+#endif /* CONFIG_64BIT */
+
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
+#ifdef CONFIG_64BIT
return oprofile_hwsampler_init(ops);
+#else
+ return -ENODEV;
+#endif
}
void oprofile_arch_exit(void)
{
+#ifdef CONFIG_64BIT
oprofile_hwsampler_exit();
+#endif
}