summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2ee4d9283738..83b5d5280e99 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2620,18 +2620,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return false;
}
- /*
- * If we have not reclaimed enough pages for compaction and the
- * inactive lists are large enough, continue reclaiming
- */
- pages_for_compaction = compact_gap(sc->order);
- inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
- if (get_nr_swap_pages() > 0)
- inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
- if (sc->nr_reclaimed < pages_for_compaction &&
- inactive_lru_pages > pages_for_compaction)
- return true;
-
/* If compaction would go ahead or the allocation would succeed, stop */
for (z = 0; z <= sc->reclaim_idx; z++) {
struct zone *zone = &pgdat->node_zones[z];
@@ -2647,7 +2635,21 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
;
}
}
- return true;
+
+ /*
+ * If we have not reclaimed enough pages for compaction and the
+ * inactive lists are large enough, continue reclaiming
+ */
+ pages_for_compaction = compact_gap(sc->order);
+ inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
+ if (get_nr_swap_pages() > 0)
+ inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
+
+ return inactive_lru_pages > pages_for_compaction &&
+ /*
+ * avoid dryrun with plenty of inactive pages
+ */
+ nr_scanned && nr_reclaimed;
}
static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)