summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-29 01:47:29 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 02:07:41 +0300
commitbb4cc2bea6df7854d629bff114ca03237cc718d6 (patch)
tree5f164e808a79a8e84029a7d2a2772cf8c02219b2 /mm
parent71c799f4982d340fff86e751898841322f07f235 (diff)
downloadlinux-bb4cc2bea6df7854d629bff114ca03237cc718d6.tar.xz
mm, vmscan: remove highmem_file_pages
With the reintroduction of per-zone LRU stats, highmem_file_pages is redundant so remove it. [mgorman@techsingularity.net: wrong stat is being accumulated in highmem_dirtyable_memory] Link: http://lkml.kernel.org/r/20160725092324.GM10438@techsingularity.netLink: http://lkml.kernel.org/r/1469110261-7365-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 573d138fa7a5..7b5920a3500f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
return nr_pages;
}
-#ifdef CONFIG_HIGHMEM
-atomic_t highmem_file_pages;
-#endif
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
- unsigned long x;
+ unsigned long x = 0;
int i;
- unsigned long dirtyable = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
nr_pages = zone_page_state(z, NR_FREE_PAGES);
/* watch for underflows */
nr_pages -= min(nr_pages, high_wmark_pages(z));
- dirtyable += nr_pages;
+ nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
+ nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+ x += nr_pages;
}
}
- x = dirtyable + atomic_read(&highmem_file_pages);
-
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below