summaryrefslogtreecommitdiff
path: root/arch/xtensa
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2012-11-11 03:30:02 +0400
committerChris Zankel <chris@zankel.net>2012-12-19 09:10:22 +0400
commit219b1e4c61c108731bb665962231b1fa057f6c71 (patch)
tree97ccb1c5069dea39b132086faf48607cf3b78cd3 /arch/xtensa
parent00273125c39be9cbf619aef90147354a9ed8c385 (diff)
downloadlinux-219b1e4c61c108731bb665962231b1fa057f6c71.tar.xz
xtensa: add s32c1i-based atomic ops implementations
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/include/asm/atomic.h271
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h71
2 files changed, 236 insertions, 106 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 24f50cada70c..c3f289174c10 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -66,19 +66,35 @@
*/
static inline void atomic_add(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "add %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " add %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " add %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/**
@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v)
*/
static inline void atomic_sub(int i, atomic_t *v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "sub %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " sub %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/*
@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "add %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " add %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ " add %0, %0, %2\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+
+ return result;
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " add %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+#endif
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "sub %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " sub %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ " sub %0, %0, %2\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+
+ return result;
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+#endif
}
/**
@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- unsigned int all_f = -1;
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "xor %1, %4, %3 \n\t"
- "and %0, %0, %4 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval), "=a" (mask)
- : "a" (v), "a" (all_f), "1" (mask)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (~mask), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int all_f = -1;
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " xor %1, %4, %3\n"
+ " and %0, %0, %4\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval), "=a" (mask)
+ : "a" (v), "a" (all_f), "1" (mask)
+ : "a15", "memory"
+ );
+#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "or %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (mask), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (mask), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " or %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (mask), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/* Atomic operations are already serializing */
@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
-
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 64dad04a9d27..25869a190490 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -22,17 +22,30 @@
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
- __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %1, 0 \n\t"
- "bne %0, %2, 1f \n\t"
- "s32i %3, %1, 0 \n\t"
- "1: \n\t"
- "wsr a15, ps \n\t"
- "rsync \n\t"
- : "=&a" (old)
- : "a" (p), "a" (old), "r" (new)
- : "a15", "memory");
- return old;
+#if XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %2, scompare1\n"
+ " s32c1i %0, %1, 0\n"
+ : "+a" (new)
+ : "a" (p), "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " bne %0, %2, 1f\n"
+ " s32i %3, %1, 0\n"
+ "1:\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (old)
+ : "a" (p), "a" (old), "r" (new)
+ : "a15", "memory");
+ return old;
+#endif
}
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
@@ -93,16 +106,32 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
- unsigned long tmp;
- __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %1, 0 \n\t"
- "s32i %2, %1, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n\t"
- : "=&a" (tmp)
- : "a" (m), "a" (val)
- : "a15", "memory");
- return tmp;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " mov %0, %3\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " s32i %2, %1, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "a15", "memory");
+ return tmp;
+#endif
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))