summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-22 11:08:45 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 18:54:02 +0300
commit6e5ef1a96e6e3b123da56292bc35017c8c401491 (patch)
treedb9532c53013bccd7ac9d186784bbc6fd343bca4 /mm/vmscan.c
parenta7290ee08e434399660ace34427c17696e47c562 (diff)
downloadlinux-6e5ef1a96e6e3b123da56292bc35017c8c401491.tar.xz
[PATCH] vmscan: emove obsolete checks from shrink_list() and fix unlikely in refill_inactive_zone()
As suggested by Marcelo: 1. The optimization introduced recently for not calling page_referenced() during zone reclaim makes two additional checks in shrink_list unnecessary. 2. The if (unlikely(sc->may_swap)) in refill_inactive_zone is optimized for the zone_reclaim case. However, most peoples system only does swap. Undo that. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Marcelo Tosatti <marcelo.tosatti@cyclades.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3914a94aa905..f713e9f6ac73 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -460,12 +460,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
*/
- if (PageAnon(page) && !PageSwapCache(page)) {
- if (!sc->may_swap)
- goto keep_locked;
+ if (PageAnon(page) && !PageSwapCache(page))
if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked;
- }
#endif /* CONFIG_SWAP */
mapping = page_mapping(page);
@@ -477,12 +474,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- /*
- * No unmapping if we do not swap
- */
- if (!sc->may_swap)
- goto keep_locked;
-
switch (try_to_unmap(page, 0)) {
case SWAP_FAIL:
goto activate_locked;
@@ -1205,7 +1196,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct pagevec pvec;
int reclaim_mapped = 0;
- if (unlikely(sc->may_swap)) {
+ if (sc->may_swap) {
long mapped_ratio;
long distress;
long swap_tendency;