summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-07-15 14:08:33 +0300
committerDavid Sterba <dsterba@suse.com>2023-08-21 15:52:17 +0300
commit13840f3f2837b9a00f06d2d8d93dbdc6b2b6532e (patch)
treef09ad0e06f59b6323d05fca6d71d40487f01dd45
parent682a0bc5573f3c10a435fb9650c856bb325d2733 (diff)
downloadlinux-13840f3f2837b9a00f06d2d8d93dbdc6b2b6532e.tar.xz
btrfs: refactor main loop in memcpy_extent_buffer()
[BACKGROUND] Currently memcpy_extent_buffer() does a loop where it would stop at any page boundary inside [dst_offset, dst_offset + len) or [src_offset, src_offset + len). This is mostly allowing us to do copy_pages(), but if we're going to use folios we will need to handle multi-page (the old behavior) or single folio (the new optimization). The current code would be a burden for future changes. [ENHANCEMENT] There is a hidden pitfall of the naming memcpy_extent_buffer(), unlike regular memcpy(), this function can handle overlapping ranges. So here we extract write_extent_buffer() into a new internal helper, __write_extent_buffer(), and add a new parameter @use_memmove, to indicate whether we should use memmove() or regular memcpy(). Now we can go __write_extent_buffer() to handle writing into the dst range, with proper overlapping detection. This has a tiny change to the chance of calling memmove(). As the split only happens at the source range page boundaries, the memcpy/memmove() range would be slightly larger than the old code, thus slightly increase the chance we call memmove() other than memcopy(). Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/extent_io.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3e30333a4c6b..0c4a305aa5cb 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4129,8 +4129,9 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb,
}
}
-void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
- unsigned long start, unsigned long len)
+static void __write_extent_buffer(const struct extent_buffer *eb,
+ const void *srcv, unsigned long start,
+ unsigned long len, bool use_memmove)
{
size_t cur;
size_t offset;
@@ -4138,6 +4139,8 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
char *kaddr;
char *src = (char *)srcv;
unsigned long i = get_eb_page_index(start);
+ /* For unmapped (dummy) ebs, no need to check their uptodate status. */
+ const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
@@ -4148,11 +4151,15 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
while (len > 0) {
page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
+ if (check_uptodate)
+ assert_eb_page_uptodate(eb, page);
cur = min(len, PAGE_SIZE - offset);
kaddr = page_address(page);
- memcpy(kaddr + offset, src, cur);
+ if (use_memmove)
+ memmove(kaddr + offset, src, cur);
+ else
+ memcpy(kaddr + offset, src, cur);
src += cur;
len -= cur;
@@ -4161,6 +4168,12 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
}
}
+void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
+ unsigned long start, unsigned long len)
+{
+ return __write_extent_buffer(eb, srcv, start, len, false);
+}
+
static void memset_extent_buffer(const struct extent_buffer *eb, int c,
unsigned long start, unsigned long len)
{
@@ -4409,34 +4422,25 @@ void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
- unsigned long dst_i;
- unsigned long src_i;
+ unsigned long cur_off = 0;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
- while (len > 0) {
- dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
- src_off_in_page = get_eb_offset_in_page(dst, src_offset);
-
- dst_i = get_eb_page_index(dst_offset);
- src_i = get_eb_page_index(src_offset);
-
- cur = min(len, (unsigned long)(PAGE_SIZE -
- src_off_in_page));
- cur = min_t(unsigned long, cur,
- (unsigned long)(PAGE_SIZE - dst_off_in_page));
-
- copy_pages(dst->pages[dst_i], dst->pages[src_i],
- dst_off_in_page, src_off_in_page, cur);
-
- src_offset += cur;
- dst_offset += cur;
- len -= cur;
+ while (cur_off < len) {
+ unsigned long cur_src = cur_off + src_offset;
+ unsigned long pg_index = get_eb_page_index(cur_src);
+ unsigned long pg_off = get_eb_offset_in_page(dst, cur_src);
+ unsigned long cur_len = min(src_offset + len - cur_src,
+ PAGE_SIZE - pg_off);
+ void *src_addr = page_address(dst->pages[pg_index]) + pg_off;
+ const bool use_memmove = areas_overlap(src_offset + cur_off,
+ dst_offset + cur_off, cur_len);
+
+ __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
+ use_memmove);
+ cur_off += cur_len;
}
}