From abd6e8a7ac49807102652861f69583944752e297 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 22 Feb 2017 15:46:04 -0800 Subject: Revert "mm: bail out in shrink_inactive_list()" This reverts commit 91dcade47a3d0e7. inactive_reclaimable_pages shouldn't be needed anymore since that get_scan_count is aware of the eligble zones ("mm, vmscan: consider eligible zones in get_scan_count"). Link: http://lkml.kernel.org/r/20170117103702.28542-4-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Hillf Danton Acked-by: Minchan Kim Acked-by: Mel Gorman Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 27 --------------------------- 1 file changed, 27 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 861ec1431f25..7bb23ff229b6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1701,30 +1701,6 @@ static int current_may_throttle(void) bdi_write_congested(current->backing_dev_info); } -static bool inactive_reclaimable_pages(struct lruvec *lruvec, - struct scan_control *sc, enum lru_list lru) -{ - int zid; - struct zone *zone; - int file = is_file_lru(lru); - struct pglist_data *pgdat = lruvec_pgdat(lruvec); - - if (!global_reclaim(sc)) - return true; - - for (zid = sc->reclaim_idx; zid >= 0; zid--) { - zone = &pgdat->node_zones[zid]; - if (!managed_zone(zone)) - continue; - - if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE + - LRU_FILE * file) >= SWAP_CLUSTER_MAX) - return true; - } - - return false; -} - /* * shrink_inactive_list() is a helper for shrink_node(). It returns the number * of reclaimed pages @@ -1743,9 +1719,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; - if (!inactive_reclaimable_pages(lruvec, sc, lru)) - return 0; - while (unlikely(too_many_isolated(pgdat, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); -- cgit v1.2.3