summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2023-12-15 01:39:38 +0300
committerDavid Sterba <dsterba@suse.com>2023-12-16 01:03:30 +0300
commit4a565c8069b7578a79d193d277e9c760aacf3e75 (patch)
treeeb3dd658036ede88fd5804ba0625af0c4650a017
parent13df3775efcaf412980c45aba2c321479bfc209a (diff)
downloadlinux-4a565c8069b7578a79d193d277e9c760aacf3e75.tar.xz
btrfs: don't double put our subpage reference in alloc_extent_buffer
This fixes as case in "btrfs: refactor alloc_extent_buffer() to allocate-then-attach method". We have been seeing panics in the CI for the subpage stuff recently, it happens on btrfs/187 but could potentially happen anywhere. In the subpage case, if we race with somebody else inserting the same extent buffer, the error case will end up calling detach_extent_buffer_page() on the page twice. This is done first in the bit for (int i = 0; i < attached; i++) detach_extent_buffer_page(eb, eb->pages[i]; and then again in btrfs_release_extent_buffer(). This works fine for !subpage because we're the only person who ever has ourselves on the private, and so when we do the initial detach_extent_buffer_page() we know we've completely removed it. However for subpage we could be using this page private elsewhere, so this results in a double put on the subpage, which can result in an early freeing. The fix here is to clear eb->pages[i] for everything we detach. Then anything still attached to the eb is freed in btrfs_release_extent_buffer(). Because of this change we must update btrfs_release_extent_buffer_pages() to not use num_extent_folios, because it assumes eb->folio[0] is set properly. Since this is only interested in freeing any pages we have on the extent buffer we can simply use INLINE_EXTENT_BUFFER_PAGES. Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/extent_io.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a5c2acd5c8ae..2c69e1f0fa10 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3175,11 +3175,9 @@ static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *f
/* Release all pages attached to the extent buffer */
static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
{
- int num_folios = num_extent_folios(eb);
-
ASSERT(!extent_buffer_under_io(eb));
- for (int i = 0; i < num_folios; i++) {
+ for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
struct folio *folio = eb->folios[i];
if (!folio)
@@ -3747,10 +3745,28 @@ again:
out:
WARN_ON(!atomic_dec_and_test(&eb->refs));
+
+ /*
+ * Any attached folios need to be detached before we unlock them. This
+ * is because when we're inserting our new folios into the mapping, and
+ * then attaching our eb to that folio. If we fail to insert our folio
+ * we'll lookup the folio for that index, and grab that EB. We do not
+ * want that to grab this eb, as we're getting ready to free it. So we
+ * have to detach it first and then unlock it.
+ *
+ * We have to drop our reference and NULL it out here because in the
+ * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
+ * Below when we call btrfs_release_extent_buffer() we will call
+ * detach_extent_buffer_folio() on our remaining pages in the !subpage
+ * case. If we left eb->folios[i] populated in the subpage case we'd
+ * double put our reference and be super sad.
+ */
for (int i = 0; i < attached; i++) {
ASSERT(eb->folios[i]);
detach_extent_buffer_folio(eb, eb->folios[i]);
unlock_page(folio_page(eb->folios[i], 0));
+ folio_put(eb->folios[i]);
+ eb->folios[i] = NULL;
}
/*
* Now all pages of that extent buffer is unmapped, set UNMAPPED flag,