summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c112
1 files changed, 28 insertions, 84 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2836b5373b2e..dcb47074ae03 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Case 1 above */
if (current_is_kswapd() &&
PageReclaim(page) &&
- zone_is_reclaim_writeback(zone)) {
+ test_bit(ZONE_WRITEBACK, &zone->flags)) {
nr_immediate++;
goto keep_locked;
@@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() ||
- !zone_is_reclaim_dirty(zone))) {
+ !test_bit(ZONE_DIRTY, &zone->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* are encountered in the nr_immediate check below.
*/
if (nr_writeback && nr_writeback == nr_taken)
- zone_set_flag(zone, ZONE_WRITEBACK);
+ set_bit(ZONE_WRITEBACK, &zone->flags);
/*
* memcg will stall in page writeback so only consider forcibly
@@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* backed by a congested BDI and wait_iff_congested will stall.
*/
if (nr_dirty && nr_dirty == nr_congested)
- zone_set_flag(zone, ZONE_CONGESTED);
+ set_bit(ZONE_CONGESTED, &zone->flags);
/*
* If dirty pages are scanned that are not queued for IO, it
* implies that flushers are not keeping up. In this case, flag
- * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
- * pages from reclaim context.
+ * the zone ZONE_DIRTY and kswapd will start writing pages from
+ * reclaim context.
*/
if (nr_unqueued_dirty == nr_taken)
- zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+ set_bit(ZONE_DIRTY, &zone->flags);
/*
* If kswapd scans pages marked marked for immediate
@@ -2315,7 +2315,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc)
return reclaimable;
}
-/* Returns true if compaction should go ahead for a high-order request */
+/*
+ * Returns true if compaction should go ahead for a high-order request, or
+ * the high-order allocation would succeed without compaction.
+ */
static inline bool compaction_ready(struct zone *zone, int order)
{
unsigned long balance_gap, watermark;
@@ -2339,8 +2342,11 @@ static inline bool compaction_ready(struct zone *zone, int order)
if (compaction_deferred(zone, order))
return watermark_ok;
- /* If compaction is not ready to start, keep reclaiming */
- if (!compaction_suitable(zone, order))
+ /*
+ * If compaction is not ready to start and allocation is not likely
+ * to succeed without it, then keep reclaiming.
+ */
+ if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
return false;
return watermark_ok;
@@ -2753,21 +2759,22 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
}
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
+ unsigned long nr_pages,
gfp_t gfp_mask,
- bool noswap)
+ bool may_swap)
{
struct zonelist *zonelist;
unsigned long nr_reclaimed;
int nid;
struct scan_control sc = {
- .nr_to_reclaim = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
.target_mem_cgroup = memcg,
.priority = DEF_PRIORITY,
.may_writepage = !laptop_mode,
.may_unmap = 1,
- .may_swap = !noswap,
+ .may_swap = may_swap,
};
/*
@@ -2818,7 +2825,7 @@ static bool zone_balanced(struct zone *zone, int order,
return false;
if (IS_ENABLED(CONFIG_COMPACTION) && order &&
- !compaction_suitable(zone, order))
+ compaction_suitable(zone, order) == COMPACT_SKIPPED)
return false;
return true;
@@ -2978,7 +2985,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
/* Account for the number of pages attempted to reclaim */
*nr_attempted += sc->nr_to_reclaim;
- zone_clear_flag(zone, ZONE_WRITEBACK);
+ clear_bit(ZONE_WRITEBACK, &zone->flags);
/*
* If a zone reaches its high watermark, consider it to be no longer
@@ -2988,8 +2995,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
*/
if (zone_reclaimable(zone) &&
zone_balanced(zone, testorder, 0, classzone_idx)) {
- zone_clear_flag(zone, ZONE_CONGESTED);
- zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
+ clear_bit(ZONE_CONGESTED, &zone->flags);
+ clear_bit(ZONE_DIRTY, &zone->flags);
}
return sc->nr_scanned >= sc->nr_to_reclaim;
@@ -3080,8 +3087,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* If balanced, clear the dirty and congested
* flags
*/
- zone_clear_flag(zone, ZONE_CONGESTED);
- zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
+ clear_bit(ZONE_CONGESTED, &zone->flags);
+ clear_bit(ZONE_DIRTY, &zone->flags);
}
}
@@ -3708,11 +3715,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
if (node_state(node_id, N_CPU) && node_id != numa_node_id())
return ZONE_RECLAIM_NOSCAN;
- if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
+ if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
return ZONE_RECLAIM_NOSCAN;
ret = __zone_reclaim(zone, gfp_mask, order);
- zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
+ clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
if (!ret)
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
@@ -3791,66 +3798,3 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
}
}
#endif /* CONFIG_SHMEM */
-
-static void warn_scan_unevictable_pages(void)
-{
- printk_once(KERN_WARNING
- "%s: The scan_unevictable_pages sysctl/node-interface has been "
- "disabled for lack of a legitimate use case. If you have "
- "one, please send an email to linux-mm@kvack.org.\n",
- current->comm);
-}
-
-/*
- * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
- * all nodes' unevictable lists for evictable pages
- */
-unsigned long scan_unevictable_pages;
-
-int scan_unevictable_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *length, loff_t *ppos)
-{
- warn_scan_unevictable_pages();
- proc_doulongvec_minmax(table, write, buffer, length, ppos);
- scan_unevictable_pages = 0;
- return 0;
-}
-
-#ifdef CONFIG_NUMA
-/*
- * per node 'scan_unevictable_pages' attribute. On demand re-scan of
- * a specified node's per zone unevictable lists for evictable pages.
- */
-
-static ssize_t read_scan_unevictable_node(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- warn_scan_unevictable_pages();
- return sprintf(buf, "0\n"); /* always zero; should fit... */
-}
-
-static ssize_t write_scan_unevictable_node(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- warn_scan_unevictable_pages();
- return 1;
-}
-
-
-static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
- read_scan_unevictable_node,
- write_scan_unevictable_node);
-
-int scan_unevictable_register_node(struct node *node)
-{
- return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
-}
-
-void scan_unevictable_unregister_node(struct node *node)
-{
- device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
-}
-#endif