summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/extent_io.c29
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/inode.c43
3 files changed, 15 insertions, 60 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 08fb26d58172..bcf8244d8e1a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -181,22 +181,6 @@ void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
}
}
-void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
-{
- struct address_space *mapping = inode->i_mapping;
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct folio *folio;
-
- while (index <= end_index) {
- folio = filemap_get_folio(mapping, index);
- filemap_dirty_folio(mapping, folio);
- folio_account_redirty(folio);
- index += folio_nr_pages(folio);
- folio_put(folio);
- }
-}
-
static void process_one_page(struct btrfs_fs_info *fs_info,
struct page *page, struct page *locked_page,
unsigned long page_ops, u64 start, u64 end)
@@ -2178,7 +2162,7 @@ retry:
* locked.
*/
void extent_write_locked_range(struct inode *inode, u64 start, u64 end,
- struct writeback_control *wbc)
+ struct writeback_control *wbc, bool pages_dirty)
{
bool found_error = false;
int ret = 0;
@@ -2204,14 +2188,11 @@ void extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int nr = 0;
page = find_get_page(mapping, cur >> PAGE_SHIFT);
- /*
- * All pages in the range are locked since
- * btrfs_run_delalloc_range(), thus there is no way to clear
- * the page dirty flag.
- */
ASSERT(PageLocked(page));
- ASSERT(PageDirty(page));
- clear_page_dirty_for_io(page);
+ if (pages_dirty) {
+ ASSERT(PageDirty(page));
+ clear_page_dirty_for_io(page);
+ }
ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
i_size, &nr);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0312022bbf4b..2678906e87c5 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -178,7 +178,7 @@ int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
void extent_write_locked_range(struct inode *inode, u64 start, u64 end,
- struct writeback_control *wbc);
+ struct writeback_control *wbc, bool pages_dirty);
int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
@@ -265,7 +265,6 @@ void set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
-void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
u32 bits_to_clear, unsigned long page_ops);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index cda511d76084..7447f3b44cd2 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -846,11 +846,17 @@ static void compress_file_range(struct btrfs_work *work)
unsigned int poff;
int i;
int compress_type = fs_info->compress_type;
- int redirty = 0;
inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
/*
+ * We need to call clear_page_dirty_for_io on each page in the range.
+ * Otherwise applications with the file mmap'd can wander in and change
+ * the page contents while we are compressing them.
+ */
+ extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+
+ /*
* We need to save i_size before now because it could change in between
* us evaluating the size and assigning it. This is because we lock and
* unlock the page in truncate and fallocate, and then modify the i_size
@@ -929,22 +935,6 @@ again:
else if (inode->prop_compress)
compress_type = inode->prop_compress;
- /*
- * We need to call clear_page_dirty_for_io on each page in the range.
- * Otherwise applications with the file mmap'd can wander in and change
- * the page contents while we are compressing them.
- *
- * If the compression fails for any reason, we set the pages dirty again
- * later on.
- *
- * Note that the remaining part is redirtied, the start pointer has
- * moved, the end is the original one.
- */
- if (!redirty) {
- extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
- redirty = 1;
- }
-
/* Compression level is applied here. */
ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
mapping, start, pages, &nr_pages, &total_in,
@@ -1040,21 +1030,6 @@ mark_incompressible:
if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
inode->flags |= BTRFS_INODE_NOCOMPRESS;
cleanup_and_bail_uncompressed:
- /*
- * No compression, but we still need to write the pages in the file
- * we've been given so far. redirty the locked page if it corresponds
- * to our extent and set things up for the async work queue to run
- * cow_file_range to do the normal delalloc dance.
- */
- if (async_chunk->locked_page &&
- (page_offset(async_chunk->locked_page) >= start &&
- page_offset(async_chunk->locked_page)) <= end) {
- __set_page_dirty_nobuffers(async_chunk->locked_page);
- /* unlocked later on in the async handlers */
- }
-
- if (redirty)
- extent_range_redirty_for_io(&inode->vfs_inode, start, end);
add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
BTRFS_COMPRESS_NONE);
free_pages:
@@ -1130,7 +1105,7 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
/* All pages will be unlocked, including @locked_page */
wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
- extent_write_locked_range(&inode->vfs_inode, start, end, &wbc);
+ extent_write_locked_range(&inode->vfs_inode, start, end, &wbc, false);
wbc_detach_inode(&wbc);
}
@@ -1754,7 +1729,7 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
}
locked_page_done = true;
extent_write_locked_range(&inode->vfs_inode, start, done_offset,
- wbc);
+ wbc, true);
start = done_offset + 1;
}