summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 12:55:37 +0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 22:25:35 +0400
commit34aa1330f9b3c5783d269851d467326525207422 (patch)
treea47db4fa53527ea937dee9e763267ab21865ce11 /mm
parentf3dbd34460ff54962d3e3244b6bcb7f5295356e6 (diff)
downloadlinux-34aa1330f9b3c5783d269851d467326525207422.tar.xz
[PATCH] zoned vm counters: zone_reclaim: remove /proc/sys/vm/zone_reclaim_interval
The zone_reclaim_interval was necessary because we were not able to determine how many unmapped pages exist in a zone. Therefore we had to scan in intervals to figure out if any pages were unmapped. With the zoned counters and NR_ANON_PAGES we now know the number of pagecache pages and the number of mapped pages in a zone. So we can simply skip the reclaim if there is an insufficient number of unmapped pages. We use SWAP_CLUSTER_MAX as the boundary. Drop all support for /proc/sys/vm/zone_reclaim_interval. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2f0390161c0e..0960846d649f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1518,11 +1518,6 @@ int zone_reclaim_mode __read_mostly;
#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
/*
- * Mininum time between zone reclaim scans
- */
-int zone_reclaim_interval __read_mostly = 30*HZ;
-
-/*
* Priority for ZONE_RECLAIM. This determines the fraction of pages
* of a node considered for each zone_reclaim. 4 scans 1/16th of
* a zone.
@@ -1587,16 +1582,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
p->reclaim_state = NULL;
current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
-
- if (nr_reclaimed == 0) {
- /*
- * We were unable to reclaim enough pages to stay on node. We
- * now allow off node accesses for a certain time period before
- * trying again to reclaim pages from the local zone.
- */
- zone->last_unsuccessful_zone_reclaim = jiffies;
- }
-
return nr_reclaimed >= nr_pages;
}
@@ -1606,13 +1591,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int node_id;
/*
- * Do not reclaim if there was a recent unsuccessful attempt at zone
- * reclaim. In that case we let allocations go off node for the
- * zone_reclaim_interval. Otherwise we would scan for each off-node
- * page allocation.
+ * Do not reclaim if there are not enough reclaimable pages in this
+ * zone that would satify this allocations.
+ *
+ * All unmapped pagecache pages are reclaimable.
+ *
+ * Both counters may be temporarily off a bit so we use
+ * SWAP_CLUSTER_MAX as the boundary. It may also be good to
+ * leave a few frequently used unmapped pagecache pages around.
*/
- if (time_before(jiffies,
- zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
+ if (zone_page_state(zone, NR_FILE_PAGES) -
+ zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
return 0;
/*