summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJianyu Zhan <nasa4836@gmail.com>2014-06-05 03:10:38 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-05 03:54:11 +0400
commit4be89a34609659042ef0bf883ad76388fb5251bb (patch)
tree7a438185cb78e2c6d3aec1e64960e64188cb4afe /mm/vmscan.c
parentb7596fb43aa786fb3ee5015a73034fbb9e80feaa (diff)
downloadlinux-4be89a34609659042ef0bf883ad76388fb5251bb.tar.xz
mm/vmscan.c: use DIV_ROUND_UP for calculation of zone's balance_gap and correct comments.
Currently, we use (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO to avoid a zero gap value. It's better to use DIV_ROUND_UP macro for neater code and clear meaning. Besides, the gap value is calculated against the per-zone "managed pages", not "present pages". This patch also corrects the comment and do some rephrasing. Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Rafael Aquini <aquini@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 494cd632178c..cc29fca8d989 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2295,9 +2295,8 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
* there is a buffer of free pages available to give compaction
* a reasonable chance of completing and allocating the page
*/
- balance_gap = min(low_wmark_pages(zone),
- (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
- KSWAPD_ZONE_BALANCE_GAP_RATIO);
+ balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
+ zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
@@ -2949,9 +2948,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
* high wmark plus a "gap" where the gap is either the low
* watermark or 1% of the zone, whichever is smaller.
*/
- balance_gap = min(low_wmark_pages(zone),
- (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
- KSWAPD_ZONE_BALANCE_GAP_RATIO);
+ balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
+ zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
/*
* If there is no low memory pressure or the zone is balanced then no