summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-05-31 09:05:02 +0300
committerDavid Sterba <dsterba@suse.com>2023-06-19 14:59:35 +0300
commit7027f87108ce3d23ea542e1a9a79056530db4a87 (patch)
tree131040b0a58efbcb5c7807b43d8067944f8e13fd /fs/btrfs
parenteb34dceace983e304e00d4bf711cec0a603959ac (diff)
downloadlinux-7027f87108ce3d23ea542e1a9a79056530db4a87.tar.xz
btrfs: don't treat zoned writeback as being from an async helper thread
When extent_write_locked_range was originally added, it was only used writing back compressed pages from an async helper thread. But it is now also used for writing back pages on zoned devices, where it is called directly from the ->writepage context. In this case we want to be able to pass on the writeback_control instead of creating a new one, and more importantly want to use all the normal cgroup interaction instead of potentially deferring writeback to another helper. Fixes: 898793d992c2 ("btrfs: zoned: write out partially allocated region") Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent_io.c20
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/inode.c20
3 files changed, 24 insertions, 19 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1da247e753b0..f4d3c56b2900 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2210,7 +2210,8 @@ retry:
* already been ran (aka, ordered extent inserted) and all pages are still
* locked.
*/
-int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
+int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
+ struct writeback_control *wbc)
{
bool found_error = false;
int first_error = 0;
@@ -2220,22 +2221,16 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
const u32 sectorsize = fs_info->sectorsize;
loff_t i_size = i_size_read(inode);
u64 cur = start;
- struct writeback_control wbc_writepages = {
- .sync_mode = WB_SYNC_ALL,
- .range_start = start,
- .range_end = end,
- .no_cgroup_owner = 1,
- };
struct btrfs_bio_ctrl bio_ctrl = {
- .wbc = &wbc_writepages,
- /* We're called from an async helper function */
- .opf = REQ_OP_WRITE | REQ_BTRFS_CGROUP_PUNT |
- wbc_to_write_flags(&wbc_writepages),
+ .wbc = wbc,
+ .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
};
+ if (wbc->no_cgroup_owner)
+ bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
+
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
- wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
while (cur <= end) {
u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
struct page *page;
@@ -2275,7 +2270,6 @@ next_page:
submit_write_bio(&bio_ctrl, found_error ? ret : 0);
- wbc_detach_inode(&wbc_writepages);
if (found_error)
return first_error;
return ret;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6723bf3483d9..c5fae3a7d911 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -178,7 +178,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
-int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
+int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
+ struct writeback_control *wbc);
int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a40a6002a198..600086b8195d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1133,6 +1133,12 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
unsigned long nr_written = 0;
int page_started = 0;
int ret;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .range_start = start,
+ .range_end = end,
+ .no_cgroup_owner = 1,
+ };
/*
* Call cow_file_range() to run the delalloc range directly, since we
@@ -1162,7 +1168,10 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
}
/* All pages will be unlocked, including @locked_page */
- return extent_write_locked_range(&inode->vfs_inode, start, end);
+ wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
+ ret = extent_write_locked_range(&inode->vfs_inode, start, end, &wbc);
+ wbc_detach_inode(&wbc);
+ return ret;
}
static int submit_one_async_extent(struct btrfs_inode *inode,
@@ -1815,7 +1824,8 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
struct page *locked_page, u64 start,
u64 end, int *page_started,
- unsigned long *nr_written)
+ unsigned long *nr_written,
+ struct writeback_control *wbc)
{
u64 done_offset = end;
int ret;
@@ -1847,8 +1857,8 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
account_page_redirty(locked_page);
}
locked_page_done = true;
- extent_write_locked_range(&inode->vfs_inode, start, done_offset);
-
+ extent_write_locked_range(&inode->vfs_inode, start, done_offset,
+ wbc);
start = done_offset + 1;
}
@@ -2422,7 +2432,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
if (zoned)
ret = run_delalloc_zoned(inode, locked_page, start, end,
- page_started, nr_written);
+ page_started, nr_written, wbc);
else
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1, NULL);