summaryrefslogtreecommitdiff
path: root/fs/afs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2020-10-30 13:01:09 +0300
committerDavid Howells <dhowells@redhat.com>2021-04-23 12:17:27 +0300
commit810caa3e6708ba234fc12591d84d4b46f9f05d72 (patch)
treee46ec5b43da1d2d674bcb5873814b7aba604d33b /fs/afs
parent630f5dda8442ca0bbbc20ab0140c5a3db34b486e (diff)
downloadlinux-810caa3e6708ba234fc12591d84d4b46f9f05d72.tar.xz
afs: Extract writeback extension into its own function
Extract writeback extension into its own function to break up the writeback function a bit. Signed-off-by: David Howells <dhowells@redhat.com> Tested-By: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588538471.3465195.782513375683399583.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118154610.1232039.1765365632920504822.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161050546.2537118.2202554806419189453.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340414102.1303470.9078891484034668985.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539558417.286939.2879469588895925399.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653813972.2770958.12671731209438112378.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789097132.6155.4916609419912731964.stgit@warthog.procyon.org.uk/ # v6
Diffstat (limited to 'fs/afs')
-rw-r--r--fs/afs/write.c109
1 files changed, 67 insertions, 42 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 1b8cabf5ac92..4ccd2c263983 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -490,47 +490,25 @@ try_next_key:
}
/*
- * Synchronously write back the locked page and any subsequent non-locked dirty
- * pages.
+ * Extend the region to be written back to include subsequent contiguously
+ * dirty pages if possible, but don't sleep while doing so.
+ *
+ * If this page holds new content, then we can include filler zeros in the
+ * writeback.
*/
-static int afs_write_back_from_locked_page(struct address_space *mapping,
- struct writeback_control *wbc,
- struct page *primary_page,
- pgoff_t final_page)
+static void afs_extend_writeback(struct address_space *mapping,
+ struct afs_vnode *vnode,
+ long *_count,
+ pgoff_t start,
+ pgoff_t final_page,
+ unsigned *_offset,
+ unsigned *_to,
+ bool new_content)
{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct iov_iter iter;
struct page *pages[8], *page;
- unsigned long count, priv;
- unsigned n, offset, to, f, t;
- pgoff_t start, first, last;
- loff_t i_size, pos, end;
- int loop, ret;
-
- _enter(",%lx", primary_page->index);
-
- count = 1;
- if (test_set_page_writeback(primary_page))
- BUG();
-
- /* Find all consecutive lockable dirty pages that have contiguous
- * written regions, stopping when we find a page that is not
- * immediately lockable, is not dirty or is missing, or we reach the
- * end of the range.
- */
- start = primary_page->index;
- priv = page_private(primary_page);
- offset = afs_page_dirty_from(primary_page, priv);
- to = afs_page_dirty_to(primary_page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
-
- WARN_ON(offset == to);
- if (offset == to)
- trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
-
- if (start >= final_page ||
- (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
- goto no_more;
+ unsigned long count = *_count, priv;
+ unsigned offset = *_offset, to = *_to, n, f, t;
+ int loop;
start++;
do {
@@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
for (loop = 0; loop < n; loop++) {
page = pages[loop];
- if (to != PAGE_SIZE &&
- !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
+ if (to != PAGE_SIZE && !new_content)
break;
if (page->index > final_page)
break;
@@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(page);
f = afs_page_dirty_from(page, priv);
t = afs_page_dirty_to(page, priv);
- if (f != 0 &&
- !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
+ if (f != 0 && !new_content) {
unlock_page(page);
break;
}
@@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
} while (start <= final_page && count < 65536);
no_more:
+ *_count = count;
+ *_offset = offset;
+ *_to = to;
+}
+
+/*
+ * Synchronously write back the locked page and any subsequent non-locked dirty
+ * pages.
+ */
+static int afs_write_back_from_locked_page(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct page *primary_page,
+ pgoff_t final_page)
+{
+ struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct iov_iter iter;
+ unsigned long count, priv;
+ unsigned offset, to;
+ pgoff_t start, first, last;
+ loff_t i_size, pos, end;
+ bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+ int ret;
+
+ _enter(",%lx", primary_page->index);
+
+ count = 1;
+ if (test_set_page_writeback(primary_page))
+ BUG();
+
+ /* Find all consecutive lockable dirty pages that have contiguous
+ * written regions, stopping when we find a page that is not
+ * immediately lockable, is not dirty or is missing, or we reach the
+ * end of the range.
+ */
+ start = primary_page->index;
+ priv = page_private(primary_page);
+ offset = afs_page_dirty_from(primary_page, priv);
+ to = afs_page_dirty_to(primary_page, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
+
+ WARN_ON(offset == to);
+ if (offset == to)
+ trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
+
+ if (start < final_page &&
+ (to == PAGE_SIZE || new_content))
+ afs_extend_writeback(mapping, vnode, &count, start, final_page,
+ &offset, &to, new_content);
+
/* We now have a contiguous set of dirty pages, each with writeback
* set; the first page is still locked at this point, but all the rest
* have been unlocked.