summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-21 17:24:41 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:00 +0300
commit85edc15a4c606094a14c36ebf5bceea7f9a3e395 (patch)
tree65f19a4cb96addd457d022b292b0b0abf0cb8c88
parentb7b098cf00a2b65d5654a86dc8edf82f125289c1 (diff)
downloadlinux-85edc15a4c606094a14c36ebf5bceea7f9a3e395.tar.xz
mm: remove folio_prep_large_rmappable()
Now that prep_compound_page() initialises folio->_deferred_list, folio_prep_large_rmappable()'s only purpose is to set the large_rmappable flag, so inline it into the two callers. Take the opportunity to convert the large_rmappable definition from PAGEFLAG to FOLIO_FLAG and remove the existance of PageTestLargeRmappable and friends. Link: https://lkml.kernel.org/r/20240321142448.1645400-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/internal.h3
4 files changed, 5 insertions, 14 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index de0c89105076..0e16451adaba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -263,7 +263,6 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-void folio_prep_large_rmappable(struct folio *folio);
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
@@ -411,8 +410,6 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
return 0;
}
-static inline void folio_prep_large_rmappable(struct folio *folio) {}
-
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4bf1c25fd1dc..6fb3cd42ee59 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -868,9 +868,9 @@ static inline void ClearPageCompound(struct page *page)
BUG_ON(!PageHead(page));
ClearPageHead(page);
}
-PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
+FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
#else
-TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
+FOLIO_FLAG_FALSE(large_rmappable)
#endif
#define PG_head_mask ((1UL << PG_head))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4cc7133aaa4b..16b2c5622fb1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -789,13 +789,6 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
}
#endif
-void folio_prep_large_rmappable(struct folio *folio)
-{
- if (!folio || !folio_test_large(folio))
- return;
- folio_set_large_rmappable(folio);
-}
-
static inline bool is_transparent_hugepage(struct folio *folio)
{
if (!folio_test_large(folio))
@@ -2862,7 +2855,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
clear_compound_head(page_tail);
if (new_order) {
prep_compound_page(page_tail, new_order);
- folio_prep_large_rmappable(new_folio);
+ folio_set_large_rmappable(new_folio);
}
/* Finally unfreeze refcount. Additional reference from page cache. */
diff --git a/mm/internal.h b/mm/internal.h
index 5c0c57c9cd19..ab8250d8a591 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -513,7 +513,8 @@ static inline struct folio *page_rmappable_folio(struct page *page)
{
struct folio *folio = (struct folio *)page;
- folio_prep_large_rmappable(folio);
+ if (folio && folio_test_large(folio))
+ folio_set_large_rmappable(folio);
return folio;
}