summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-16 18:11:55 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-22 00:28:44 +0300
commit9c5ccf2db04b8d7c3df363fdd4856c2b79ab2c6a (patch)
tree2cf6a2dffee40003d9452b72e660bb64635f0f3b /mm
parent0f2f43fabb95192c73b19586ef7536d7ac7c2f8c (diff)
downloadlinux-9c5ccf2db04b8d7c3df363fdd4856c2b79ab2c6a.tar.xz
mm: remove HUGETLB_PAGE_DTOR
We can use a bit in page[1].flags to indicate that this folio belongs to hugetlb instead of using a value in page[1].dtors. That lets folio_test_hugetlb() become an inline function like it should be. We can also get rid of NULL_COMPOUND_DTOR. Link: https://lkml.kernel.org/r/20230816151201.3655946-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Yanteng Si <siyanteng@loongson.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c49
-rw-r--r--mm/page_alloc.c2
2 files changed, 7 insertions, 44 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6a3c80026ab3..a82c3104337e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1585,25 +1585,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
{
lockdep_assert_held(&hugetlb_lock);
- /*
- * Very subtle
- *
- * For non-gigantic pages set the destructor to the normal compound
- * page dtor. This is needed in case someone takes an additional
- * temporary ref to the page, and freeing is delayed until they drop
- * their reference.
- *
- * For gigantic pages set the destructor to the null dtor. This
- * destructor will never be called. Before freeing the gigantic
- * page destroy_compound_gigantic_folio will turn the folio into a
- * simple group of pages. After this the destructor does not
- * apply.
- *
- */
- if (hstate_is_gigantic(h))
- folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR);
- else
- folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
+ folio_clear_hugetlb(folio);
}
/*
@@ -1690,7 +1672,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
h->surplus_huge_pages_node[nid]++;
}
- folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR);
+ folio_set_hugetlb(folio);
folio_change_private(folio, NULL);
/*
* We have to set hugetlb_vmemmap_optimized again as above
@@ -1814,9 +1796,8 @@ static void free_hpage_workfn(struct work_struct *work)
/*
* The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
* folio_hstate() is going to trigger because a previous call to
- * remove_hugetlb_folio() will call folio_set_compound_dtor
- * (folio, NULL_COMPOUND_DTOR), so do not use folio_hstate()
- * directly.
+ * remove_hugetlb_folio() will clear the hugetlb bit, so do
+ * not use folio_hstate() directly.
*/
h = size_to_hstate(page_size(page));
@@ -1955,7 +1936,7 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
hugetlb_vmemmap_optimize(h, &folio->page);
INIT_LIST_HEAD(&folio->lru);
- folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR);
+ folio_set_hugetlb(folio);
hugetlb_set_folio_subpool(folio, NULL);
set_hugetlb_cgroup(folio, NULL);
set_hugetlb_cgroup_rsvd(folio, NULL);
@@ -2070,28 +2051,10 @@ int PageHuge(struct page *page)
if (!PageCompound(page))
return 0;
folio = page_folio(page);
- return folio->_folio_dtor == HUGETLB_PAGE_DTOR;
+ return folio_test_hugetlb(folio);
}
EXPORT_SYMBOL_GPL(PageHuge);
-/**
- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
- * @folio: The folio to test.
- *
- * Context: Any context. Caller should have a reference on the folio to
- * prevent it from being turned into a tail page.
- * Return: True for hugetlbfs folios, false for anon folios or folios
- * belonging to other filesystems.
- */
-bool folio_test_hugetlb(struct folio *folio)
-{
- if (!folio_test_large(folio))
- return false;
-
- return folio->_folio_dtor == HUGETLB_PAGE_DTOR;
-}
-EXPORT_SYMBOL_GPL(folio_test_hugetlb);
-
/*
* Find and lock address space (mapping) in write mode.
*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 31fec31be31e..d96dc6a3077a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1112,7 +1112,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
if (compound)
- ClearPageHasHWPoisoned(page);
+ page[1].flags &= ~PAGE_FLAGS_SECOND;
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_page_prepare(page, page + i);