From c66db8c0702c0ab741ecfd5e12b323ff49fe9089 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:09 +0200 Subject: mm/rmap: move SetPageAnonExclusive out of __page_set_anon_rmap() Let's handle it in the caller. No need to pass the page. While at it, rename the function to __folio_set_anon() and pass "bool exclusive" instead of "int exclusive". Link: https://lkml.kernel.org/r/20230913125113.313322-3-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/rmap.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) (limited to 'mm/rmap.c') diff --git a/mm/rmap.c b/mm/rmap.c index 61ad50b9ed9f..f76f0f23725c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1122,27 +1122,25 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) } /** - * __page_set_anon_rmap - set up new anonymous rmap - * @folio: Folio which contains page. - * @page: Page to add to rmap. - * @vma: VM area to add page to. + * __folio_set_anon - set up a new anonymous rmap for a folio + * @folio: The folio to set up the new anonymous rmap for. + * @vma: VM area to add the folio to. * @address: User virtual address of the mapping - * @exclusive: the page is exclusively owned by the current process + * @exclusive: Whether the folio is exclusive to the process. */ -static void __page_set_anon_rmap(struct folio *folio, struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) +static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (folio_test_anon(folio)) - goto out; + return; /* - * If the page isn't exclusively mapped into this vma, - * we must use the _oldest_ possible anon_vma for the - * page mapping! + * If the folio isn't exclusive to this vma, we must use the _oldest_ + * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; @@ -1156,9 +1154,6 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page, anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); -out: - if (exclusive) - SetPageAnonExclusive(page); } /** @@ -1246,11 +1241,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, if (likely(!folio_test_ksm(folio))) { if (first) - __page_set_anon_rmap(folio, page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); + __folio_set_anon(folio, vma, address, + !!(flags & RMAP_EXCLUSIVE)); else __page_check_anon_rmap(folio, page, vma, address); } + if (flags & RMAP_EXCLUSIVE) + SetPageAnonExclusive(page); mlock_vma_folio(folio, vma, compound); } @@ -1289,7 +1286,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - __page_set_anon_rmap(folio, &folio->page, vma, address, 1); + __folio_set_anon(folio, vma, address, true); + SetPageAnonExclusive(&folio->page); } /** @@ -2552,8 +2550,10 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (first) - __page_set_anon_rmap(folio, page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); + __folio_set_anon(folio, vma, address, + !!(flags & RMAP_EXCLUSIVE)); + if (flags & RMAP_EXCLUSIVE) + SetPageAnonExclusive(page); } void hugepage_add_new_anon_rmap(struct folio *folio, @@ -2563,6 +2563,7 @@ void hugepage_add_new_anon_rmap(struct folio *folio, /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); - __page_set_anon_rmap(folio, &folio->page, vma, address, 1); + __folio_set_anon(folio, vma, address, true); + SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */ -- cgit v1.2.3