diff options
author | Bin Meng <bmeng.cn@gmail.com> | 2020-03-09 06:52:41 +0300 |
---|---|---|
committer | Anup Patel <anup@brainfault.org> | 2020-03-10 07:57:28 +0300 |
commit | 650c0e525ce60e80b6756aa8cc4eeae5b20ccbd8 (patch) | |
tree | 579e92ff39fa780818fe483337850d28df5ddb15 /lib/sbi/riscv_atomic.c | |
parent | 6e87507db6ce7be39ccccb33d6283814f0283c09 (diff) | |
download | opensbi-650c0e525ce60e80b6756aa8cc4eeae5b20ccbd8.tar.xz |
lib: sbi: Fix coding style issues
This fixes various coding style issues found in the SBI codes.
No functional changes.
Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
Reviewed-by: Anup Patel <anup.patel@wdc.com>
Diffstat (limited to 'lib/sbi/riscv_atomic.c')
-rw-r--r-- | lib/sbi/riscv_atomic.c | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/lib/sbi/riscv_atomic.c b/lib/sbi/riscv_atomic.c index 9d8199e..fcf3ed1 100644 --- a/lib/sbi/riscv_atomic.c +++ b/lib/sbi/riscv_atomic.c @@ -49,7 +49,7 @@ long atomic_sub_return(atomic_t *atom, long value) return ret - value; } -#define __axchg(ptr, new, size) \ +#define __axchg(ptr, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(new) __new = (new); \ @@ -70,12 +70,12 @@ long atomic_sub_return(atomic_t *atom, long value) : "memory"); \ break; \ default: \ - break; \ + break; \ } \ __ret; \ }) -#define axchg(ptr, x) \ +#define axchg(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __axchg((ptr), _x_, sizeof(*(ptr))); \ @@ -90,20 +90,20 @@ long atomic_sub_return(atomic_t *atom, long value) register unsigned int __rc; \ switch (size) { \ case 4: \ - __asm__ __volatile__("0: lr.w %0, %2\n" \ - " sc.w.rl %1, %z3, %2\n" \ - " bnez %1, 0b\n" \ - " fence rw, rw\n" \ + __asm__ __volatile__("0: lr.w %0, %2\n" \ + " sc.w.rl %1, %z3, %2\n" \ + " bnez %1, 0b\n" \ + " fence rw, rw\n" \ : "=&r"(__ret), "=&r"(__rc), \ "+A"(*__ptr) \ : "rJ"(__new) \ : "memory"); \ break; \ case 8: \ - __asm__ __volatile__("0: lr.d %0, %2\n" \ - " sc.d.rl %1, %z3, %2\n" \ - " bnez %1, 0b\n" \ - " fence rw, rw\n" \ + __asm__ __volatile__("0: lr.d %0, %2\n" \ + " sc.d.rl %1, %z3, %2\n" \ + " bnez %1, 0b\n" \ + " fence rw, rw\n" \ : "=&r"(__ret), "=&r"(__rc), \ "+A"(*__ptr) \ : "rJ"(__new) \ @@ -130,11 +130,11 @@ long atomic_sub_return(atomic_t *atom, long value) register unsigned int __rc; \ switch (size) { \ case 4: \ - __asm__ __volatile__("0: lr.w %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.w.rl %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - " fence rw, rw\n" \ + __asm__ __volatile__("0: lr.w %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.w.rl %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + " fence rw, rw\n" \ "1:\n" \ : "=&r"(__ret), "=&r"(__rc), \ "+A"(*__ptr) \ @@ -142,11 +142,11 @@ long atomic_sub_return(atomic_t *atom, long value) : "memory"); \ break; \ case 8: \ - __asm__ __volatile__("0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d.rl %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - " fence rw, rw\n" \ + __asm__ __volatile__("0: lr.d %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.d.rl %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + " fence rw, rw\n" \ "1:\n" \ : "=&r"(__ret), "=&r"(__rc), \ "+A"(*__ptr) \ |