diff options
author | Qu Wenruo <wqu@suse.com> | 2023-11-17 06:54:14 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2023-12-15 22:27:01 +0300 |
commit | cfbf07e2787e4da79c63622f1a6e64cc89f3a829 (patch) | |
tree | b69d375e861865f59dc17844e4c445f40d89ab35 /fs/btrfs/extent_io.c | |
parent | 4cea422a776558ccf84e918205d0c162a516502c (diff) | |
download | linux-cfbf07e2787e4da79c63622f1a6e64cc89f3a829.tar.xz |
btrfs: migrate to use folio private instead of page private
As a cleanup and preparation for future folio migration, this patch
would replace all page->private to folio version. This includes:
- PagePrivate()
-> folio_test_private()
- page->private
-> folio_get_private()
- attach_page_private()
-> folio_attach_private()
- detach_page_private()
-> folio_detach_private()
Since we're here, also remove the forced cast on page->private, since
it's (void *) already, we don't really need to do the cast.
For now even if we missed some call sites, it won't cause any problem
yet, as we're only using order 0 folio (single page), thus all those
folio/page flags should be synced.
But for the future conversion to utilize higher order folio, the page
<-> folio flag sync is no longer guaranteed, thus we have to migrate to
utilize folio flags.
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 102 |
1 files changed, 55 insertions, 47 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8f724c54fc8e..d68626d1c286 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -562,11 +562,13 @@ update: static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page) { + struct folio *folio = page_folio(page); + ASSERT(PageLocked(page)); if (!btrfs_is_subpage(fs_info, page)) return; - ASSERT(PagePrivate(page)); + ASSERT(folio_test_private(folio)); btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE); } @@ -865,6 +867,7 @@ static int attach_extent_buffer_page(struct extent_buffer *eb, struct page *page, struct btrfs_subpage *prealloc) { + struct folio *folio = page_folio(page); struct btrfs_fs_info *fs_info = eb->fs_info; int ret = 0; @@ -878,22 +881,22 @@ static int attach_extent_buffer_page(struct extent_buffer *eb, lockdep_assert_held(&page->mapping->private_lock); if (fs_info->nodesize >= PAGE_SIZE) { - if (!PagePrivate(page)) - attach_page_private(page, eb); + if (!folio_test_private(folio)) + folio_attach_private(folio, eb); else - WARN_ON(page->private != (unsigned long)eb); + WARN_ON(folio_get_private(folio) != eb); return 0; } /* Already mapped, just free prealloc */ - if (PagePrivate(page)) { + if (folio_test_private(folio)) { btrfs_free_subpage(prealloc); return 0; } if (prealloc) /* Has preallocated memory for subpage */ - attach_page_private(page, prealloc); + folio_attach_private(folio, prealloc); else /* Do new allocation to attach subpage */ ret = btrfs_attach_subpage(fs_info, page, @@ -903,11 +906,12 @@ static int attach_extent_buffer_page(struct extent_buffer *eb, int set_page_extent_mapped(struct page *page) { + struct folio *folio = page_folio(page); struct btrfs_fs_info *fs_info; ASSERT(page->mapping); - if (PagePrivate(page)) + if (folio_test_private(folio)) return 0; fs_info = btrfs_sb(page->mapping->host->i_sb); @@ -915,24 +919,25 @@ int set_page_extent_mapped(struct page *page) if (btrfs_is_subpage(fs_info, page)) return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA); - attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE); + folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE); return 0; } void clear_page_extent_mapped(struct page *page) { + struct folio *folio = page_folio(page); struct btrfs_fs_info *fs_info; ASSERT(page->mapping); - if (!PagePrivate(page)) + if (!folio_test_private(folio)) return; fs_info = btrfs_sb(page->mapping->host->i_sb); if (btrfs_is_subpage(fs_info, page)) return btrfs_detach_subpage(fs_info, page); - detach_page_private(page); + folio_detach_private(folio); } static struct extent_map * @@ -1240,7 +1245,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, static void find_next_dirty_byte(struct btrfs_fs_info *fs_info, struct page *page, u64 *start, u64 *end) { - struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; + struct folio *folio = page_folio(page); + struct btrfs_subpage *subpage = folio_get_private(folio); struct btrfs_subpage_info *spi = fs_info->subpage_info; u64 orig_start = *start; /* Declare as unsigned long so we can use bitmap ops */ @@ -1725,6 +1731,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) { struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); + struct folio *folio = page_folio(page); int submitted = 0; u64 page_start = page_offset(page); int bit_start = 0; @@ -1732,7 +1739,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) /* Lock and write each dirty extent buffers in the range */ while (bit_start < fs_info->subpage_info->bitmap_nr_bits) { - struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; + struct btrfs_subpage *subpage = folio_get_private(folio); struct extent_buffer *eb; unsigned long flags; u64 start; @@ -1742,7 +1749,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) * in the meantime. */ spin_lock(&page->mapping->private_lock); - if (!PagePrivate(page)) { + if (!folio_test_private(folio)) { spin_unlock(&page->mapping->private_lock); break; } @@ -1807,22 +1814,23 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx) { struct writeback_control *wbc = ctx->wbc; struct address_space *mapping = page->mapping; + struct folio *folio = page_folio(page); struct extent_buffer *eb; int ret; - if (!PagePrivate(page)) + if (!folio_test_private(folio)) return 0; if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) return submit_eb_subpage(page, wbc); spin_lock(&mapping->private_lock); - if (!PagePrivate(page)) { + if (!folio_test_private(folio)) { spin_unlock(&mapping->private_lock); return 0; } - eb = (struct extent_buffer *)page->private; + eb = folio_get_private(folio); /* * Shouldn't happen and normally this would be a BUG_ON but no point @@ -3060,12 +3068,13 @@ static int extent_buffer_under_io(const struct extent_buffer *eb) static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page) { + struct folio *folio = page_folio(page); struct btrfs_subpage *subpage; lockdep_assert_held(&page->mapping->private_lock); - if (PagePrivate(page)) { - subpage = (struct btrfs_subpage *)page->private; + if (folio_test_private(folio)) { + subpage = folio_get_private(folio); if (atomic_read(&subpage->eb_refs)) return true; /* @@ -3082,15 +3091,16 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag { struct btrfs_fs_info *fs_info = eb->fs_info; const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); + struct folio *folio = page_folio(page); /* - * For mapped eb, we're going to change the page private, which should + * For mapped eb, we're going to change the folio private, which should * be done under the private_lock. */ if (mapped) spin_lock(&page->mapping->private_lock); - if (!PagePrivate(page)) { + if (!folio_test_private(folio)) { if (mapped) spin_unlock(&page->mapping->private_lock); return; @@ -3101,19 +3111,15 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag * We do this since we'll remove the pages after we've * removed the eb from the radix tree, so we could race * and have this page now attached to the new eb. So - * only clear page_private if it's still connected to + * only clear folio if it's still connected to * this eb. */ - if (PagePrivate(page) && - page->private == (unsigned long)eb) { + if (folio_test_private(folio) && folio_get_private(folio) == eb) { BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); BUG_ON(PageDirty(page)); BUG_ON(PageWriteback(page)); - /* - * We need to make sure we haven't be attached - * to a new eb. - */ - detach_page_private(page); + /* We need to make sure we haven't be attached to a new eb. */ + folio_detach_private(folio); } if (mapped) spin_unlock(&page->mapping->private_lock); @@ -3121,9 +3127,9 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag } /* - * For subpage, we can have dummy eb with page private. In this case, - * we can directly detach the private as such page is only attached to - * one dummy eb, no sharing. + * For subpage, we can have dummy eb with folio private attached. In + * this case, we can directly detach the private as such folio is only + * attached to one dummy eb, no sharing. */ if (!mapped) { btrfs_detach_subpage(fs_info, page); @@ -3133,7 +3139,7 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag btrfs_page_dec_eb_refs(fs_info, page); /* - * We can only detach the page private if there are no other ebs in the + * We can only detach the folio private if there are no other ebs in the * page range and no unfinished IO. */ if (!page_range_has_eb(fs_info, page)) @@ -3410,6 +3416,7 @@ free_eb: static struct extent_buffer *grab_extent_buffer( struct btrfs_fs_info *fs_info, struct page *page) { + struct folio *folio = page_folio(page); struct extent_buffer *exists; /* @@ -3421,21 +3428,21 @@ static struct extent_buffer *grab_extent_buffer( return NULL; /* Page not yet attached to an extent buffer */ - if (!PagePrivate(page)) + if (!folio_test_private(folio)) return NULL; /* * We could have already allocated an eb for this page and attached one * so lets see if we can get a ref on the existing eb, and if we can we * know it's good and we can just return that one, else we know we can - * just overwrite page->private. + * just overwrite folio private. */ - exists = (struct extent_buffer *)page->private; + exists = folio_get_private(folio); if (atomic_inc_not_zero(&exists->refs)) return exists; WARN_ON(PageDirty(page)); - detach_page_private(page); + folio_detach_private(folio); return NULL; } @@ -3519,7 +3526,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, num_pages = num_extent_pages(eb); /* - * Preallocate page->private for subpage case, so that we won't + * Preallocate folio private for subpage case, so that we won't * allocate memory with private_lock nor page lock hold. * * The memory will be freed by attach_extent_buffer_page() or freed @@ -3556,7 +3563,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, ASSERT(!ret); /* * To inform we have extra eb under allocation, so that - * detach_extent_buffer_page() won't release the page private + * detach_extent_buffer_page() won't release the folio private * when the eb hasn't yet been inserted into radix tree. * * The ref will be decreased when the eb released the page, in @@ -4520,7 +4527,7 @@ static int try_release_subpage_extent_buffer(struct page *page) struct extent_buffer *eb = NULL; /* - * Unlike try_release_extent_buffer() which uses page->private + * Unlike try_release_extent_buffer() which uses folio private * to grab buffer, for subpage case we rely on radix tree, thus * we need to ensure radix tree consistency. * @@ -4560,17 +4567,17 @@ static int try_release_subpage_extent_buffer(struct page *page) /* * Here we don't care about the return value, we will always - * check the page private at the end. And + * check the folio private at the end. And * release_extent_buffer() will release the refs_lock. */ release_extent_buffer(eb); } /* - * Finally to check if we have cleared page private, as if we have - * released all ebs in the page, the page private should be cleared now. + * Finally to check if we have cleared folio private, as if we have + * released all ebs in the page, the folio private should be cleared now. */ spin_lock(&page->mapping->private_lock); - if (!PagePrivate(page)) + if (!folio_test_private(page_folio(page))) ret = 1; else ret = 0; @@ -4581,22 +4588,23 @@ static int try_release_subpage_extent_buffer(struct page *page) int try_release_extent_buffer(struct page *page) { + struct folio *folio = page_folio(page); struct extent_buffer *eb; if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) return try_release_subpage_extent_buffer(page); /* - * We need to make sure nobody is changing page->private, as we rely on - * page->private as the pointer to extent buffer. + * We need to make sure nobody is changing folio private, as we rely on + * folio private as the pointer to extent buffer. */ spin_lock(&page->mapping->private_lock); - if (!PagePrivate(page)) { + if (!folio_test_private(folio)) { spin_unlock(&page->mapping->private_lock); return 1; } - eb = (struct extent_buffer *)page->private; + eb = folio_get_private(folio); BUG_ON(!eb); /* |