summaryrefslogtreecommitdiff
path: root/fs/btrfs/ordered-data.h
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2022-09-30 23:45:10 +0300
committerDavid Sterba <dsterba@suse.com>2022-12-05 20:00:36 +0300
commit632ddfa2131f0fea1831bc1f4b28c68faa779156 (patch)
tree3bfa0fb94e69d7dbeafc3eca3dd1273c12e18861 /fs/btrfs/ordered-data.h
parent83ae4133ac9410ac6a57136e464d498dc66200cf (diff)
downloadlinux-632ddfa2131f0fea1831bc1f4b28c68faa779156.tar.xz
btrfs: use cached_state for btrfs_check_nocow_lock
Now that try_lock_extent() takes a cached_state, plumb the cached_state through btrfs_try_lock_ordered_range() and then use a cached_state in btrfs_check_nocow_lock everywhere to avoid extra tree searches on the extent_io_tree. Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/ordered-data.h')
-rw-r--r--fs/btrfs/ordered-data.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index f59f2dbdb25e..89f82b78f590 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -206,7 +206,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
struct extent_state **cached_state);
-bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end);
+bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
+ struct extent_state **cached_state);
int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
u64 post);
int __init ordered_data_init(void);