summaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 08:02:47 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-05 08:02:47 +0300
commit95ffa676583b23baed40861d30b65fe31397da00 (patch)
tree40ea8089c208af4061bcc8046bf2599a3a3c7309 /arch/parisc/include/asm
parent4da9f3302615f4191814f826054846bf843e24fa (diff)
parente2693ec1e0a12a6dc601e4dfe9b6f714b92f6954 (diff)
downloadlinux-95ffa676583b23baed40861d30b65fe31397da00.tar.xz
Merge branch 'parisc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller: "The majority of the patches are reverts of previous commits regarding the parisc-specific low level spinlocking code and barrier handling, with which we tried to fix CPU stalls on our build servers. In the end John David Anglin found the culprit: We missed a define for atomic64_set_release(). This seems to have fixed our issues, so now it's good to remove the unnecessary code again. Other than that it's trivial stuff: Spelling fixes, constifications and such" * 'parisc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: make the log level string for register dumps const parisc: Do not use an ordered store in pa_tlb_lock() Revert "parisc: Revert "Release spinlocks using ordered store"" Revert "parisc: Use ldcw instruction for SMP spinlock release barrier" Revert "parisc: Drop LDCW barrier in CAS code when running UP" Revert "parisc: Improve interrupt handling in arch_spin_lock_flags()" parisc: Replace HTTP links with HTTPS ones parisc: elf.h: delete a duplicated word parisc: Report bad pages as HardwareCorrupted parisc: Convert to BIT_MASK() and BIT_WORD()
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/bitops.h41
-rw-r--r--arch/parisc/include/asm/elf.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h33
3 files changed, 24 insertions, 52 deletions
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index a09eaebfdfd0..aa4e883431c1 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -12,21 +12,6 @@
#include <asm/barrier.h>
#include <linux/atomic.h>
-/*
- * HP-PARISC specific bit operations
- * for a detailed description of the functions please refer
- * to include/asm-i386/bitops.h or kerneldoc
- */
-
-#if __BITS_PER_LONG == 64
-#define SHIFT_PER_LONG 6
-#else
-#define SHIFT_PER_LONG 5
-#endif
-
-#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
-
-
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
@@ -35,10 +20,10 @@
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long mask = BIT_MASK(nr);
unsigned long flags;
- addr += (nr >> SHIFT_PER_LONG);
+ addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags);
*addr |= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
@@ -46,21 +31,21 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
+ unsigned long mask = BIT_MASK(nr);
unsigned long flags;
- addr += (nr >> SHIFT_PER_LONG);
+ addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags);
- *addr &= mask;
+ *addr &= ~mask;
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long mask = BIT_MASK(nr);
unsigned long flags;
- addr += (nr >> SHIFT_PER_LONG);
+ addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags);
*addr ^= mask;
_atomic_spin_unlock_irqrestore(addr, flags);
@@ -68,12 +53,12 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long mask = BIT_MASK(nr);
unsigned long old;
unsigned long flags;
int set;
- addr += (nr >> SHIFT_PER_LONG);
+ addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags);
old = *addr;
set = (old & mask) ? 1 : 0;
@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long mask = BIT_MASK(nr);
unsigned long old;
unsigned long flags;
int set;
- addr += (nr >> SHIFT_PER_LONG);
+ addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags);
old = *addr;
set = (old & mask) ? 1 : 0;
@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{
- unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long mask = BIT_MASK(nr);
unsigned long oldbit;
unsigned long flags;
- addr += (nr >> SHIFT_PER_LONG);
+ addr += BIT_WORD(nr);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = *addr;
*addr = oldbit ^ mask;
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index d00973aab7f1..301af07a04d4 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -152,7 +152,7 @@
/* The following are PA function descriptors
*
* addr: the absolute address of the function
- * gp: either the data pointer (r27) for non-PIC code or the
+ * gp: either the data pointer (r27) for non-PIC code or
* the PLT pointer (r19) for PIC code */
/* Format for the Elf32 Function descriptor */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 70fecb8dc4e2..51b6c47f802f 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -10,34 +10,25 @@
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
- smp_mb();
return *a == 0;
}
-static inline void arch_spin_lock(arch_spinlock_t *x)
-{
- volatile unsigned int *a;
-
- a = __ldcw_align(x);
- while (__ldcw(a) == 0)
- while (*a == 0)
- cpu_relax();
-}
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
- unsigned long flags_dis;
a = __ldcw_align(x);
- while (__ldcw(a) == 0) {
- local_save_flags(flags_dis);
- local_irq_restore(flags);
+ while (__ldcw(a) == 0)
while (*a == 0)
- cpu_relax();
- local_irq_restore(flags_dis);
- }
+ if (flags & PSW_SM_I) {
+ local_irq_enable();
+ cpu_relax();
+ local_irq_disable();
+ } else
+ cpu_relax();
}
#define arch_spin_lock_flags arch_spin_lock_flags
@@ -46,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
-#ifdef CONFIG_SMP
- (void) __ldcw(a);
-#else
- mb();
-#endif
- *a = 1;
+ /* Release with ordered store. */
+ __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)