summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2024-03-20 21:02:07 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:02 +0300
commit9cbe97bad5cd75b5b493734bd2695febb8e95281 (patch)
tree8aecf3ebca5a64eee699068b88c8331bf0ab2081 /mm
parent17edeb5d3f761c20fd28f6002f5a9faa53c0a0d8 (diff)
downloadlinux-9cbe97bad5cd75b5b493734bd2695febb8e95281.tar.xz
mm: page_alloc: optimize free_unref_folios()
Move direct freeing of isolated pages to the lock-breaking block in the second loop. This saves an unnecessary migratetype reassessment. Minor comment and local variable scoping cleanups. Link: https://lkml.kernel.org/r/20240320180429.678181-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: David Hildenbrand <david@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 56a341c8b3ac..5fa3d534df2f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2493,7 +2493,7 @@ void free_unref_folios(struct folio_batch *folios)
unsigned long __maybe_unused UP_flags;
struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL;
- int i, j, migratetype;
+ int i, j;
/* Prepare folios for freeing */
for (i = 0, j = 0; i < folios->nr; i++) {
@@ -2505,14 +2505,15 @@ void free_unref_folios(struct folio_batch *folios)
folio_undo_large_rmappable(folio);
if (!free_pages_prepare(&folio->page, order))
continue;
-
/*
- * Free isolated folios and orders not handled on the PCP
- * directly to the allocator, see comment in free_unref_page.
+ * Free orders not handled on the PCP directly to the
+ * allocator.
*/
- migratetype = get_pfnblock_migratetype(&folio->page, pfn);
- if (!pcp_allowed_order(order) ||
- is_migrate_isolate(migratetype)) {
+ if (!pcp_allowed_order(order)) {
+ int migratetype;
+
+ migratetype = get_pfnblock_migratetype(&folio->page,
+ pfn);
free_one_page(folio_zone(folio), &folio->page, pfn,
order, migratetype, FPI_NONE);
continue;
@@ -2529,15 +2530,29 @@ void free_unref_folios(struct folio_batch *folios)
struct zone *zone = folio_zone(folio);
unsigned long pfn = folio_pfn(folio);
unsigned int order = (unsigned long)folio->private;
+ int migratetype;
folio->private = NULL;
migratetype = get_pfnblock_migratetype(&folio->page, pfn);
/* Different zone requires a different pcp lock */
- if (zone != locked_zone) {
+ if (zone != locked_zone ||
+ is_migrate_isolate(migratetype)) {
if (pcp) {
pcp_spin_unlock(pcp);
pcp_trylock_finish(UP_flags);
+ locked_zone = NULL;
+ pcp = NULL;
+ }
+
+ /*
+ * Free isolated pages directly to the
+ * allocator, see comment in free_unref_page.
+ */
+ if (is_migrate_isolate(migratetype)) {
+ free_one_page(zone, &folio->page, pfn,
+ order, migratetype, FPI_NONE);
+ continue;
}
/*
@@ -2550,7 +2565,6 @@ void free_unref_folios(struct folio_batch *folios)
pcp_trylock_finish(UP_flags);
free_one_page(zone, &folio->page, pfn,
order, migratetype, FPI_NONE);
- locked_zone = NULL;
continue;
}
locked_zone = zone;