summaryrefslogtreecommitdiff
path: root/fs/netfs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-01-06 01:03:58 +0300
committerDavid Howells <dhowells@redhat.com>2024-01-06 02:13:48 +0300
commit807c6d09cc99cbdf9933edfadcbaa8f0b856848d (patch)
tree4ac3fb1575ab147903339b51fd201f401fc63ccc /fs/netfs
parent92a714d727ec9e7ccfcc7432d348aba730145914 (diff)
downloadlinux-807c6d09cc99cbdf9933edfadcbaa8f0b856848d.tar.xz
netfs: Fix the loop that unmarks folios after writing to the cache
In the loop in netfs_rreq_unmark_after_write() that removes the PG_fscache from folios after they've been written to the cache, as soon as we remove the mark from a multipage folio, it can get split - and then we might see a fragment of folio again. Guard against this by advancing the 'unlocked' tracker to the index of the last page in the folio to avoid a double removal of the PG_fscache mark. Reported-by: Marc Dionne <marc.dionne@auristor.com> Signed-off-by: David Howells <dhowells@redhat.com> cc: Matthew Wilcox <willy@infradead.org> cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/buffered_write.c1
-rw-r--r--fs/netfs/io.c2
2 files changed, 2 insertions, 1 deletions
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 6cd8f7422e9a..0b2b7a60dabc 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -698,6 +698,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
end_wb:
if (folio_test_fscache(folio))
folio_end_fscache(folio);
+ xas_advance(&xas, folio_next_index(folio) - 1);
folio_end_writeback(folio);
}
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 5b5af96cd4b9..4309edf33862 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -126,7 +126,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
*/
if (have_unlocked && folio_index(folio) <= unlocked)
continue;
- unlocked = folio_index(folio);
+ unlocked = folio_next_index(folio) - 1;
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_fscache(folio);
have_unlocked = true;