summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorlinke li <lilinke99@qq.com>2024-04-26 13:34:44 +0300
committerJens Axboe <axboe@kernel.dk>2024-04-26 16:40:28 +0300
commit6ad0d7e0f4b68f87a98ea2b239123b7d865df86b (patch)
treed8ac274dd632519dc9f9694cd631024cfb3eeaa7 /lib
parent07d1b99825f40f9c0d93e6b99d79a08d0717bac1 (diff)
downloadlinux-6ad0d7e0f4b68f87a98ea2b239123b7d865df86b.tar.xz
sbitmap: use READ_ONCE to access map->word
In __sbitmap_queue_get_batch(), map->word is read several times, and update atomically using atomic_long_try_cmpxchg(). But the first two read of map->word is not protected. This patch moves the statement val = READ_ONCE(map->word) forward, eliminating unprotected accesses to map->word within the function. It is aimed at reducing the number of benign races reported by KCSAN in order to focus future debugging effort on harmful races. Signed-off-by: linke li <lilinke99@qq.com> Link: https://lore.kernel.org/r/tencent_0B517C25E519D3D002194E8445E86C04AD0A@qq.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib')
-rw-r--r--lib/sbitmap.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 92c6b1fd8989..1e453f825c05 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -494,18 +494,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
+ unsigned long val;
sbitmap_deferred_clear(map);
- if (map->word == (1UL << (map_depth - 1)) - 1)
+ val = READ_ONCE(map->word);
+ if (val == (1UL << (map_depth - 1)) - 1)
goto next;
- nr = find_first_zero_bit(&map->word, map_depth);
+ nr = find_first_zero_bit(&val, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
- unsigned long val;
get_mask = ((1UL << nr_tags) - 1) << nr;
- val = READ_ONCE(map->word);
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;