summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-16 18:11:52 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-22 00:28:43 +0300
commit8dc4a8f1e038189cb575f89bcd23364698b88cc1 (patch)
tree000cc2ba54fe80c8b41ba25dc5791f55d338a137 /mm
parent454a00c40a21c59e99c526fe8cc57bd029cf8f0e (diff)
downloadlinux-8dc4a8f1e038189cb575f89bcd23364698b88cc1.tar.xz
mm: convert free_transhuge_folio() to folio_undo_large_rmappable()
Indirect calls are expensive, thanks to Spectre. Test for TRANSHUGE_PAGE_DTOR and destroy the folio appropriately. Move the free_compound_page() call into destroy_large_folio() to simplify later patches. Link: https://lkml.kernel.org/r/20230816151201.3655946-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Yanteng Si <siyanteng@loongson.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c22
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c9
3 files changed, 19 insertions, 14 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 154c210892a1..b33456683b93 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2776,10 +2776,9 @@ out:
return ret;
}
-void free_transhuge_page(struct page *page)
+void folio_undo_large_rmappable(struct folio *folio)
{
- struct folio *folio = (struct folio *)page;
- struct deferred_split *ds_queue = get_deferred_split_queue(folio);
+ struct deferred_split *ds_queue;
unsigned long flags;
/*
@@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page)
* deferred_list. If folio is not in deferred_list, it's safe
* to check without acquiring the split_queue_lock.
*/
- if (data_race(!list_empty(&folio->_deferred_list))) {
- spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
- if (!list_empty(&folio->_deferred_list)) {
- ds_queue->split_queue_len--;
- list_del(&folio->_deferred_list);
- }
- spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+ if (data_race(list_empty(&folio->_deferred_list)))
+ return;
+
+ ds_queue = get_deferred_split_queue(folio);
+ spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ if (!list_empty(&folio->_deferred_list)) {
+ ds_queue->split_queue_len--;
+ list_del(&folio->_deferred_list);
}
- free_compound_page(page);
+ spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
void deferred_split_folio(struct folio *folio)
diff --git a/mm/internal.h b/mm/internal.h
index d99ffb473f90..30bbfcacc909 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -413,6 +413,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
#endif
}
+void folio_undo_large_rmappable(struct folio *folio);
+
static inline void prep_compound_head(struct page *page, unsigned int order)
{
struct folio *folio = (struct folio *)page;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 30dc444436cc..4047b5897443 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
[NULL_COMPOUND_DTOR] = NULL,
[COMPOUND_PAGE_DTOR] = free_compound_page,
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
-#endif
};
int min_free_kbytes = 1024;
@@ -614,6 +611,12 @@ void destroy_large_folio(struct folio *folio)
return;
}
+ if (folio_test_transhuge(folio) && dtor == TRANSHUGE_PAGE_DTOR) {
+ folio_undo_large_rmappable(folio);
+ free_compound_page(&folio->page);
+ return;
+ }
+
VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
compound_page_dtors[dtor](&folio->page);
}