summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/tls.h62
-rw-r--r--arch/arm/include/asm/uaccess.h48
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h25
-rw-r--r--arch/arm/include/asm/xen/page.h9
4 files changed, 108 insertions, 36 deletions
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 83259b873333..36172adda9d0 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -1,6 +1,9 @@
#ifndef __ASMARM_TLS_H
#define __ASMARM_TLS_H
+#include <linux/compiler.h>
+#include <asm/thread_info.h>
+
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
@@ -50,6 +53,47 @@
#endif
#ifndef __ASSEMBLY__
+
+static inline void set_tls(unsigned long val)
+{
+ struct thread_info *thread;
+
+ thread = current_thread_info();
+
+ thread->tp_value[0] = val;
+
+ /*
+ * This code runs with preemption enabled and therefore must
+ * be reentrant with respect to switch_tls.
+ *
+ * We need to ensure ordering between the shadow state and the
+ * hardware state, so that we don't corrupt the hardware state
+ * with a stale shadow state during context switch.
+ *
+ * If we're preempted here, switch_tls will load TPIDRURO from
+ * thread_info upon resuming execution and the following mcr
+ * is merely redundant.
+ */
+ barrier();
+
+ if (!tls_emu) {
+ if (has_tls_reg) {
+ asm("mcr p15, 0, %0, c13, c0, 3"
+ : : "r" (val));
+ } else {
+ /*
+ * User space must never try to access this
+ * directly. Expect your app to break
+ * eventually if you do so. The user helper
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
+ *((unsigned int *)0xffff0ff0) = val;
+ }
+
+ }
+}
+
static inline unsigned long get_tpuser(void)
{
unsigned long reg = 0;
@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void)
return reg;
}
+
+static inline void set_tpuser(unsigned long val)
+{
+ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
+ * we need not update thread_info.
+ */
+ if (has_tls_reg && !tls_emu) {
+ asm("mcr p15, 0, %0, c13, c0, 2"
+ : : "r" (val));
+ }
+}
+
+static inline void flush_tls(void)
+{
+ set_tls(0);
+ set_tpuser(0);
+}
+
#endif
#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a4cd7af475e9..4767eb9caa78 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs)
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-extern int __get_user_lo8(void *);
+extern int __get_user_32t_8(void *);
extern int __get_user_8(void *);
+extern int __get_user_64t_1(void *);
+extern int __get_user_64t_2(void *);
+extern int __get_user_64t_4(void *);
#define __GUP_CLOBBER_1 "lr", "cc"
#ifdef CONFIG_CPU_USE_DOMAINS
@@ -117,7 +120,7 @@ extern int __get_user_8(void *);
#define __GUP_CLOBBER_2 "lr", "cc"
#endif
#define __GUP_CLOBBER_4 "lr", "cc"
-#define __GUP_CLOBBER_lo8 "lr", "cc"
+#define __GUP_CLOBBER_32t_8 "lr", "cc"
#define __GUP_CLOBBER_8 "lr", "cc"
#define __get_user_x(__r2,__p,__e,__l,__s) \
@@ -131,12 +134,30 @@ extern int __get_user_8(void *);
/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
-#define __get_user_xb(__r2, __p, __e, __l, __s) \
- __get_user_x(__r2, __p, __e, __l, lo8)
+#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
+ __get_user_x(__r2, __p, __e, __l, 32t_8)
#else
-#define __get_user_xb __get_user_x
+#define __get_user_x_32t __get_user_x
#endif
+/*
+ * storing result into proper least significant word of 64bit target var,
+ * different only for big endian case where 64 bit __r2 lsw is r3:
+ */
+#ifdef __ARMEB__
+#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __get_user_64t_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
+#else
+#define __get_user_x_64t __get_user_x
+#endif
+
+
#define __get_user_check(x,p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
@@ -146,17 +167,26 @@ extern int __get_user_8(void *);
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, __l, 1); \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 1); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __get_user_x(__r2, __p, __e, __l, 2); \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 2); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, __l, 4); \
+ if (sizeof((x)) >= 8) \
+ __get_user_x_64t(__r2, __p, __e, __l, 4); \
+ else \
+ __get_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
if (sizeof((x)) < 8) \
- __get_user_xb(__r2, __p, __e, __l, 4); \
+ __get_user_x_32t(__r2, __p, __e, __l, 4); \
else \
__get_user_x(__r2, __p, __e, __l, 8); \
break; \
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 1109017499e5..e8275ea88e88 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-}
+ struct dma_attrs *attrs);
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-}
+void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir);
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index ded062f9b358..135c24a5ba26 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -33,7 +33,6 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
unsigned long __pfn_to_mfn(unsigned long pfn);
-unsigned long __mfn_to_pfn(unsigned long mfn);
extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
- unsigned long pfn;
-
- if (phys_to_mach.rb_node != NULL) {
- pfn = __mfn_to_pfn(mfn);
- if (pfn != INVALID_P2M_ENTRY)
- return pfn;
- }
-
return mfn;
}