summaryrefslogtreecommitdiff
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2023-03-01 23:47:08 +0300
committerDavid Sterba <dsterba@suse.com>2023-04-17 19:01:17 +0300
commit0b5485391deff35c6633d7cbff5cf3a8229fbe9e (patch)
treeffb1add40aa5cff2b52b3db39e9061bc61dd2b95 /fs/btrfs/locking.c
parentce4cf3793e72a4ca2eeebc5db5d04f25b42a59db (diff)
downloadlinux-0b5485391deff35c6633d7cbff5cf3a8229fbe9e.tar.xz
btrfs: locking: use atomic for DREW lock writers
The DREW lock uses percpu variable to track lock counters and for that it needs to allocate the structure. In btrfs_read_tree_root() or btrfs_init_fs_root() this may add another error case or requires the NOFS scope protection. One way is to preallocate the structure as was suggested in https://lore.kernel.org/linux-btrfs/20221214021125.28289-1-robbieko@synology.com/ We may avoid the allocation altogether if we don't use the percpu variables but an atomic for the writer counter. This should not make any difference, the DREW lock is used for truncate and NOCOW writes along with other IO operations. The percpu counter for writers has been there since the original commit 8257b2dc3c1a1057 "Btrfs: introduce btrfs_{start, end}_nocow_write() for each subvolume". The reason could be to avoid hammering the same cacheline from all the readers but then the writers do that anyway. Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c25
1 files changed, 6 insertions, 19 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 870528d87526..3a496b0d3d2b 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -325,24 +325,12 @@ struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
* acquire the lock.
*/
-int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
+void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
{
- int ret;
-
- ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
- if (ret)
- return ret;
-
atomic_set(&lock->readers, 0);
+ atomic_set(&lock->writers, 0);
init_waitqueue_head(&lock->pending_readers);
init_waitqueue_head(&lock->pending_writers);
-
- return 0;
-}
-
-void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
-{
- percpu_counter_destroy(&lock->writers);
}
/* Return true if acquisition is successful, false otherwise */
@@ -351,10 +339,10 @@ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
if (atomic_read(&lock->readers))
return false;
- percpu_counter_inc(&lock->writers);
+ atomic_inc(&lock->writers);
/* Ensure writers count is updated before we check for pending readers */
- smp_mb();
+ smp_mb__after_atomic();
if (atomic_read(&lock->readers)) {
btrfs_drew_write_unlock(lock);
return false;
@@ -374,7 +362,7 @@ void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
{
- percpu_counter_dec(&lock->writers);
+ atomic_dec(&lock->writers);
cond_wake_up(&lock->pending_readers);
}
@@ -390,8 +378,7 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
*/
smp_mb__after_atomic();
- wait_event(lock->pending_readers,
- percpu_counter_sum(&lock->writers) == 0);
+ wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
}
void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)