summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/futex.h66
-rw-r--r--arch/s390/include/asm/mmu_context.h37
-rw-r--r--arch/s390/include/asm/switch_to.h1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/uaccess.h2
5 files changed, 86 insertions, 22 deletions
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index fda46bd38c99..69cf5b5eddc9 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -1,12 +1,25 @@
#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
-#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <linux/futex.h>
+#include <asm/mmu_context.h>
#include <asm/errno.h>
-int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
-int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
+#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
+ asm volatile( \
+ " sacf 256\n" \
+ "0: l %1,0(%6)\n" \
+ "1:"insn \
+ "2: cs %1,%2,0(%6)\n" \
+ "3: jl 1b\n" \
+ " lhi %0,0\n" \
+ "4: sacf 768\n" \
+ EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
+ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
+ "=m" (*uaddr) \
+ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
+ "m" (*uaddr) : "cc");
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int oldval, ret;
+ int oldval = 0, newval, ret;
+ update_primary_asce(current);
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
pagefault_disable();
- ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("lr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("lr %2,%1\nar %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("lr %2,%1\nor %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
pagefault_enable();
if (!ret) {
@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret;
}
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret;
+
+ update_primary_asce(current);
+ asm volatile(
+ " sacf 256\n"
+ "0: cs %1,%4,0(%5)\n"
+ "1: la %0,0\n"
+ "2: sacf 768\n"
+ EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
+ : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+ : "cc", "memory");
+ *uval = oldval;
+ return ret;
+}
+
#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 71a258839039..71be346d0e3c 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -30,27 +30,33 @@ static inline int init_new_context(struct task_struct *tsk,
#define destroy_context(mm) do { } while (0)
-#ifndef CONFIG_64BIT
-#define LCTL_OPCODE "lctl"
-#else
-#define LCTL_OPCODE "lctlg"
-#endif
-
-static inline void update_user_asce(struct mm_struct *mm)
+static inline void update_user_asce(struct mm_struct *mm, int load_primary)
{
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- /* Load primary space page table origin. */
- asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
+ if (load_primary)
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
set_fs(current->thread.mm_segment);
}
-static inline void clear_user_asce(struct mm_struct *mm)
+static inline void clear_user_asce(struct mm_struct *mm, int load_primary)
{
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
- asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
- asm volatile(LCTL_OPCODE" 7,7,%0\n" : : "m" (S390_lowcore.user_asce));
+
+ if (load_primary)
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+}
+
+static inline void update_primary_asce(struct task_struct *tsk)
+{
+ unsigned long asce;
+
+ __ctl_store(asce, 1, 1);
+ if (asce != S390_lowcore.kernel_asce)
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_tsk_thread_flag(tsk, TIF_ASCE);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -58,6 +64,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
+ update_primary_asce(tsk);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
@@ -66,10 +73,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Delay update_user_asce until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
/* Clear old ASCE by loading the kernel ASCE. */
- clear_user_asce(next);
+ clear_user_asce(next, 0);
} else {
cpumask_set_cpu(cpu, mm_cpumask(next));
- update_user_asce(next);
+ update_user_asce(next, 0);
if (next->context.flush_mm)
/* Flush pending TLBs */
__tlb_flush_mm(next);
@@ -94,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- update_user_asce(mm);
+ update_user_asce(mm, 0);
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 29c81f82705e..e759181357fc 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs)
update_cr_regs(next); \
} \
prev = __switch_to(prev,next); \
+ update_primary_asce(current); \
} while (0)
#define finish_arch_switch(prev) do { \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 3ccd71b90345..50630e6a35de 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
+#define TIF_ASCE 5 /* primary asce needs fixup / uaccess */
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
+#define _TIF_ASCE (1<<TIF_ASCE)
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 4133b3f72fb0..1be64a1506d0 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
-int __handle_fault(unsigned long, unsigned long, int);
-
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.