summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-09 03:32:45 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 11:22:50 +0400
commitc89511ab2f8fe2b47585e60da8af7fd213ec877e (patch)
treec6b04cb5335957e8409edc77ca23ef012d9d326d /mm/compaction.c
parentbb13ffeb9f6bfeb301443994dfbf29f91117dfb3 (diff)
downloadlinux-c89511ab2f8fe2b47585e60da8af7fd213ec877e.tar.xz
mm: compaction: Restart compaction from near where it left off
This is almost entirely based on Rik's previous patches and discussions with him about how this might be implemented. Order > 0 compaction stops when enough free pages of the correct page order have been coalesced. When doing subsequent higher order allocations, it is possible for compaction to be invoked many times. However, the compaction code always starts out looking for things to compact at the start of the zone, and for free pages to compact things to at the end of the zone. This can cause quadratic behaviour, with isolate_freepages starting at the end of the zone each time, even though previous invocations of the compaction code already filled up all free memory on that end of the zone. This can cause isolate_freepages to take enormous amounts of CPU with certain workloads on larger memory systems. This patch caches where the migration and free scanner should start from on subsequent compaction invocations using the pageblock-skip information. When compaction starts it begins from the cached restart points and will update the cached restart points until a page is isolated or a pageblock is skipped that would have been scanned by synchronous compaction. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Richard Davies <richard@arachsys.com> Cc: Shaohua Li <shli@kernel.org> Cc: Avi Kivity <avi@redhat.com> Acked-by: Rafael Aquini <aquini@redhat.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c58
1 files changed, 48 insertions, 10 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index d9dbb97e607b..f94cbc0b99a5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -80,6 +80,9 @@ static void reset_isolation_suitable(struct zone *zone)
*/
if (time_before(jiffies, zone->compact_blockskip_expire))
return;
+
+ zone->compact_cached_migrate_pfn = start_pfn;
+ zone->compact_cached_free_pfn = end_pfn;
zone->compact_blockskip_expire = jiffies + (HZ * 5);
/* Walk the zone and mark every pageblock as suitable for isolation */
@@ -103,13 +106,29 @@ static void reset_isolation_suitable(struct zone *zone)
* If no pages were isolated then mark this pageblock to be skipped in the
* future. The information is later cleared by reset_isolation_suitable().
*/
-static void update_pageblock_skip(struct page *page, unsigned long nr_isolated)
+static void update_pageblock_skip(struct compact_control *cc,
+ struct page *page, unsigned long nr_isolated,
+ bool migrate_scanner)
{
+ struct zone *zone = cc->zone;
if (!page)
return;
- if (!nr_isolated)
+ if (!nr_isolated) {
+ unsigned long pfn = page_to_pfn(page);
set_pageblock_skip(page);
+
+ /* Update where compaction should restart */
+ if (migrate_scanner) {
+ if (!cc->finished_update_migrate &&
+ pfn > zone->compact_cached_migrate_pfn)
+ zone->compact_cached_migrate_pfn = pfn;
+ } else {
+ if (!cc->finished_update_free &&
+ pfn < zone->compact_cached_free_pfn)
+ zone->compact_cached_free_pfn = pfn;
+ }
+ }
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
@@ -118,7 +137,9 @@ static inline bool isolation_suitable(struct compact_control *cc,
return true;
}
-static void update_pageblock_skip(struct page *page, unsigned long nr_isolated)
+static void update_pageblock_skip(struct compact_control *cc,
+ struct page *page, unsigned long nr_isolated,
+ bool migrate_scanner)
{
}
#endif /* CONFIG_COMPACTION */
@@ -327,7 +348,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
- update_pageblock_skip(valid_page, total_isolated);
+ update_pageblock_skip(cc, valid_page, total_isolated, false);
return total_isolated;
}
@@ -533,6 +554,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/
if (!cc->sync && last_pageblock_nr != pageblock_nr &&
!migrate_async_suitable(get_pageblock_migratetype(page))) {
+ cc->finished_update_migrate = true;
goto next_pageblock;
}
@@ -583,6 +605,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
VM_BUG_ON(PageTransCompound(page));
/* Successfully isolated */
+ cc->finished_update_migrate = true;
del_page_from_lru_list(page, lruvec, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
@@ -609,7 +632,7 @@ next_pageblock:
/* Update the pageblock-skip if the whole pageblock was scanned */
if (low_pfn == end_pfn)
- update_pageblock_skip(valid_page, nr_isolated);
+ update_pageblock_skip(cc, valid_page, nr_isolated, true);
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -690,8 +713,10 @@ static void isolate_freepages(struct zone *zone,
* looking for free pages, the search will restart here as
* page migration may have returned some pages to the allocator
*/
- if (isolated)
+ if (isolated) {
+ cc->finished_update_free = true;
high_pfn = max(high_pfn, pfn);
+ }
}
/* split_free_page does not map the pages */
@@ -888,6 +913,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
ret = compaction_suitable(zone, cc->order);
switch (ret) {
@@ -900,10 +927,21 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
;
}
- /* Setup to move all movable pages to the end of the zone */
- cc->migrate_pfn = zone->zone_start_pfn;
- cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
- cc->free_pfn &= ~(pageblock_nr_pages-1);
+ /*
+ * Setup to move all movable pages to the end of the zone. Used cached
+ * information on where the scanners should start but check that it
+ * is initialised by ensuring the values are within zone boundaries.
+ */
+ cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+ cc->free_pfn = zone->compact_cached_free_pfn;
+ if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
+ cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+ zone->compact_cached_free_pfn = cc->free_pfn;
+ }
+ if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+ cc->migrate_pfn = start_pfn;
+ zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+ }
/* Clear pageblock skip if there are numerous alloc failures */
if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT)