summaryrefslogtreecommitdiff
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-11-22 18:35:47 +0300
committerJens Axboe <axboe@kernel.dk>2020-12-08 03:12:49 +0300
commitc3250c8d2451ffbea14ba95164c59edd943ee4be (patch)
treed15f81f25f233c6576b615fdf44bdf77c3bc4696 /lib/sbitmap.c
parent661d4f55a79483aee4970a76e3bd9d4cdc74ac79 (diff)
downloadlinux-c3250c8d2451ffbea14ba95164c59edd943ee4be.tar.xz
sbitmap: replace CAS with atomic and
sbitmap_deferred_clear() does CAS loop to propagate cleared bits, replace it with equivalent atomic bitwise and. That's slightly faster and makes wait-free instead of lock-free as before. The atomic can be relaxed (i.e. barrier-less) because following sbitmap_get*() deal with synchronisation, see comments in sbitmap_queue_clear(). It's ok to cast to atomic_long_t, that's what bitops/lock.h does. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 4fd877048ba8..c18b518a16ba 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -14,7 +14,7 @@
*/
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
- unsigned long mask, val;
+ unsigned long mask;
if (!READ_ONCE(map->cleared))
return false;
@@ -27,10 +27,8 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
/*
* Now clear the masked bits in our free word
*/
- do {
- val = map->word;
- } while (cmpxchg(&map->word, val, val & ~mask) != val);
-
+ atomic_long_andnot(mask, (atomic_long_t *)&map->word);
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
return true;
}