summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-09 01:02:46 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-09 01:35:28 +0300
commit29c0dde830f8c08ceacf2d3edf6dc8ddd9a9c3c4 (patch)
tree840bad954f0509229796e0bc82060f5bca0e4e63 /mm/compaction.c
parent02333641e2cf4ac9f23eeeb01183ed8318d346ca (diff)
downloadlinux-29c0dde830f8c08ceacf2d3edf6dc8ddd9a9c3c4.tar.xz
mm, compaction: always skip all compound pages by order in migrate scanner
The compaction migrate scanner tries to skip THP pages by their order, to reduce number of iterations for pages it cannot isolate. The check is only done if PageLRU() is true, which means it applies to THP pages, but not e.g. hugetlbfs pages or any other non-LRU compound pages, which we have to iterate by base pages. This limitation comes from the assumption that it's only safe to read compound_order() when we have the zone's lru_lock and THP cannot be split under us. But the only danger (after filtering out order values that are not below MAX_ORDER, to prevent overflows) is that we skip too much or too little after reading a bogus compound_order() due to a rare race. This is the same reasoning as patch 99c0fd5e51c4 ("mm, compaction: skip buddy pages by their order in the migrate scanner") introduced for unsafely reading PageBuddy() order. After this patch, all pages are tested for PageCompound() and we skip them by compound_order(). The test is done after the test for balloon_page_movable() as we don't want to assume if balloon pages (or other pages with own isolation and migration implementation if a generic API gets implemented) are compound or not. When tested with stress-highalloc from mmtests on 4GB system with 1GB hugetlbfs pages, the vmstat compact_migrate_scanned count decreased by 15%. [kirill.shutemov@linux.intel.com: change PageTransHuge checks to PageCompound for different series was squashed here] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 0dce7e87d771..1ccb015ab1eb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -680,6 +680,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
+ bool is_lru;
+
/*
* Periodically drop the lock (if held) regardless of its
* contention, to give chance to IRQs. Abort async compaction
@@ -723,36 +725,35 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* It's possible to migrate LRU pages and balloon pages
* Skip any other type of page
*/
- if (!PageLRU(page)) {
+ is_lru = PageLRU(page);
+ if (!is_lru) {
if (unlikely(balloon_page_movable(page))) {
if (balloon_page_isolate(page)) {
/* Successfully isolated */
goto isolate_success;
}
}
- continue;
}
/*
- * PageLRU is set. lru_lock normally excludes isolation
- * splitting and collapsing (collapsing has already happened
- * if PageLRU is set) but the lock is not necessarily taken
- * here and it is wasteful to take it just to check transhuge.
- * Check TransHuge without lock and skip the whole pageblock if
- * it's either a transhuge or hugetlbfs page, as calling
- * compound_order() without preventing THP from splitting the
- * page underneath us may return surprising results.
+ * Regardless of being on LRU, compound pages such as THP and
+ * hugetlbfs are not to be compacted. We can potentially save
+ * a lot of iterations if we skip them at once. The check is
+ * racy, but we can consider only valid values and the only
+ * danger is skipping too much.
*/
- if (PageTransHuge(page)) {
- if (!locked)
- low_pfn = ALIGN(low_pfn + 1,
- pageblock_nr_pages) - 1;
- else
- low_pfn += (1 << compound_order(page)) - 1;
+ if (PageCompound(page)) {
+ unsigned int comp_order = compound_order(page);
+
+ if (likely(comp_order < MAX_ORDER))
+ low_pfn += (1UL << comp_order) - 1;
continue;
}
+ if (!is_lru)
+ continue;
+
/*
* Migration will fail if an anonymous page is pinned in memory,
* so avoid taking lru_lock and isolating it unnecessarily in an
@@ -769,11 +770,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (!locked)
break;
- /* Recheck PageLRU and PageTransHuge under lock */
+ /* Recheck PageLRU and PageCompound under lock */
if (!PageLRU(page))
continue;
- if (PageTransHuge(page)) {
- low_pfn += (1 << compound_order(page)) - 1;
+
+ /*
+ * Page become compound since the non-locked check,
+ * and it's on LRU. It can only be a THP so the order
+ * is safe to read and it's 0 for tail pages.
+ */
+ if (unlikely(PageCompound(page))) {
+ low_pfn += (1UL << compound_order(page)) - 1;
continue;
}
}
@@ -784,7 +791,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (__isolate_lru_page(page, isolate_mode) != 0)
continue;
- VM_BUG_ON_PAGE(PageTransCompound(page), page);
+ VM_BUG_ON_PAGE(PageCompound(page), page);
/* Successfully isolated */
del_page_from_lru_list(page, lruvec, page_lru(page));