summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2017-11-18 02:26:30 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-18 03:10:00 +0300
commit21dc7e023611fbcf8e38f255731bcf3cc38e7638 (patch)
tree6487d3f4f8887428a17f189610dda040d8bf7afd /mm/compaction.c
parenta0647dc9208fae9124ca38d43a5c3c950d955291 (diff)
downloadlinux-21dc7e023611fbcf8e38f255731bcf3cc38e7638.tar.xz
mm, compaction: persistently skip hugetlbfs pageblocks
It is pointless to migrate hugetlb memory as part of memory compaction if the hugetlb size is equal to the pageblock order. No defragmentation is occurring in this condition. It is also pointless to for the freeing scanner to scan a pageblock where a hugetlb page is pinned. Unconditionally skip these pageblocks, and do so peristently so that they are not rescanned until it is observed that these hugepages are no longer pinned. It would also be possible to do this by involving the hugetlb subsystem in marking pageblocks to no longer be skipped when they hugetlb pages are freed. This is a simple solution that doesn't involve any additional subsystems in pageblock skip manipulation. [rientjes@google.com: fix build] Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708201734390.117182@chino.kir.corp.google.com Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708151639130.106658@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Tested-by: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index ad40d67421f3..94b5c0865dd1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -219,6 +219,20 @@ static void reset_cached_positions(struct zone *zone)
}
/*
+ * Hugetlbfs pages should consistenly be skipped until updated by the hugetlb
+ * subsystem. It is always pointless to compact pages of pageblock_order and
+ * the free scanner can reconsider when no longer huge.
+ */
+static bool pageblock_skip_persistent(struct page *page, unsigned int order)
+{
+ if (!PageHuge(page))
+ return false;
+ if (order != pageblock_order)
+ return false;
+ return true;
+}
+
+/*
* This function is called to clear all cached information on pageblocks that
* should be skipped for page isolation when the migrate and free page scanner
* meet.
@@ -242,6 +256,8 @@ static void __reset_isolation_suitable(struct zone *zone)
continue;
if (zone != page_zone(page))
continue;
+ if (pageblock_skip_persistent(page, compound_order(page)))
+ continue;
clear_pageblock_skip(page);
}
@@ -307,7 +323,13 @@ static inline bool isolation_suitable(struct compact_control *cc,
return true;
}
-static void update_pageblock_skip(struct compact_control *cc,
+static inline bool pageblock_skip_persistent(struct page *page,
+ unsigned int order)
+{
+ return false;
+}
+
+static inline void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
bool migrate_scanner)
{
@@ -449,13 +471,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* and the only danger is skipping too much.
*/
if (PageCompound(page)) {
- unsigned int comp_order = compound_order(page);
-
- if (likely(comp_order < MAX_ORDER)) {
- blockpfn += (1UL << comp_order) - 1;
- cursor += (1UL << comp_order) - 1;
+ const unsigned int order = compound_order(page);
+
+ if (pageblock_skip_persistent(page, order)) {
+ set_pageblock_skip(page);
+ blockpfn = end_pfn;
+ } else if (likely(order < MAX_ORDER)) {
+ blockpfn += (1UL << order) - 1;
+ cursor += (1UL << order) - 1;
}
-
goto isolate_fail;
}
@@ -772,11 +796,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* danger is skipping too much.
*/
if (PageCompound(page)) {
- unsigned int comp_order = compound_order(page);
-
- if (likely(comp_order < MAX_ORDER))
- low_pfn += (1UL << comp_order) - 1;
+ const unsigned int order = compound_order(page);
+ if (pageblock_skip_persistent(page, order)) {
+ set_pageblock_skip(page);
+ low_pfn = end_pfn;
+ } else if (likely(order < MAX_ORDER))
+ low_pfn += (1UL << order) - 1;
goto isolate_fail;
}
@@ -838,7 +864,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* is safe to read and it's 0 for tail pages.
*/
if (unlikely(PageCompound(page))) {
- low_pfn += (1UL << compound_order(page)) - 1;
+ const unsigned int order = compound_order(page);
+
+ if (pageblock_skip_persistent(page, order)) {
+ set_pageblock_skip(page);
+ low_pfn = end_pfn;
+ } else
+ low_pfn += (1UL << order) - 1;
goto isolate_fail;
}
}