summaryrefslogtreecommitdiff
path: root/scripts/atomic/fallbacks/fetch_add_unless
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-07-13 13:52:50 +0300
committerPeter Zijlstra <peterz@infradead.org>2021-07-16 19:46:44 +0300
commitf3e615b4db1fb7034f1d76dc307b77cc848f040e (patch)
tree131fa0f8061d0faae81132f964e4e929a1ad4759 /scripts/atomic/fallbacks/fetch_add_unless
parent47401d94947d507ff9f33fccf490baf47638fb69 (diff)
downloadlinux-f3e615b4db1fb7034f1d76dc307b77cc848f040e.tar.xz
locking/atomic: remove ARCH_ATOMIC remanants
Now that gen-atomic-fallback.sh is only used to generate the arch_* fallbacks, we don't need to also generate the non-arch_* forms, and can removethe infrastructure this needed. There is no change to any of the generated headers as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210713105253.7615-3-mark.rutland@arm.com
Diffstat (limited to 'scripts/atomic/fallbacks/fetch_add_unless')
-rwxr-xr-xscripts/atomic/fallbacks/fetch_add_unless8
1 files changed, 4 insertions, 4 deletions
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless
index 0e0b9aef1515..68ce13c8b9da 100755
--- a/scripts/atomic/fallbacks/fetch_add_unless
+++ b/scripts/atomic/fallbacks/fetch_add_unless
@@ -1,6 +1,6 @@
cat << EOF
/**
- * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
+ * arch_${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -9,14 +9,14 @@ cat << EOF
* Returns original value of @v
*/
static __always_inline ${int}
-${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
- ${int} c = ${arch}${atomic}_read(v);
+ ${int} c = arch_${atomic}_read(v);
do {
if (unlikely(c == u))
break;
- } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
+ } while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}