summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-06-28 18:31:30 +0300
committerDavid Sterba <dsterba@suse.com>2023-08-21 15:52:14 +0300
commit2c73162d649670045415537dc746f7524cda1405 (patch)
tree5ff4e80739d5f009f2aa18ba2249ddc91cf7f4e9 /fs/btrfs/extent_io.c
parent0835d1e66e7f22428bbc2ea56cb488a7759b51e2 (diff)
downloadlinux-2c73162d649670045415537dc746f7524cda1405.tar.xz
btrfs: improve the delalloc_to_write calculation in writepage_delalloc
Currently writepage_delalloc adds to delalloc_to_write in every loop operation. That is not only more work than doing it once after the loop, but can also over-increment the counter due to rounding errors when a new loop iteration starts with an offset into a page. Add a new page_start variable instead of recaculation that value over and over, move the delalloc_to_write calculation out of the loop, use the DIV_ROUND_UP helper instead of open coding it and remove the pointless found local variable. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4981058d3877..148d289f70f0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1185,8 +1185,10 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
struct page *page, struct writeback_control *wbc)
{
- const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
- u64 delalloc_start = page_offset(page);
+ const u64 page_start = page_offset(page);
+ const u64 page_end = page_start + PAGE_SIZE - 1;
+ u64 delalloc_start = page_start;
+ u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
/* How many pages are started by btrfs_run_delalloc_range() */
unsigned long nr_written = 0;
@@ -1194,13 +1196,9 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
int page_started = 0;
while (delalloc_start < page_end) {
- u64 delalloc_end = page_end;
- bool found;
-
- found = find_lock_delalloc_range(&inode->vfs_inode, page,
- &delalloc_start,
- &delalloc_end);
- if (!found) {
+ delalloc_end = page_end;
+ if (!find_lock_delalloc_range(&inode->vfs_inode, page,
+ &delalloc_start, &delalloc_end)) {
delalloc_start = delalloc_end + 1;
continue;
}
@@ -1209,14 +1207,15 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
if (ret)
return ret;
- /*
- * delalloc_end is already one less than the total length, so
- * we don't subtract one from PAGE_SIZE
- */
- delalloc_to_write += (delalloc_end - delalloc_start +
- PAGE_SIZE) >> PAGE_SHIFT;
delalloc_start = delalloc_end + 1;
}
+
+ /*
+ * delalloc_end is already one less than the total length, so
+ * we don't subtract one from PAGE_SIZE
+ */
+ delalloc_to_write +=
+ DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
if (wbc->nr_to_write < delalloc_to_write) {
int thresh = 8192;