summaryrefslogtreecommitdiff
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-01-07 01:39:29 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-07 02:59:02 +0300
commit364aeb2849789b51bf4b9af2ddd02fee7285c54e (patch)
tree4a24ea43a2a76ae744571f3c7b5022aa1107599e /mm/page-writeback.c
parent58a01a45721bf7bd3a41a86248c3cb02a6b0c501 (diff)
downloadlinux-364aeb2849789b51bf4b9af2ddd02fee7285c54e.tar.xz
mm: change dirty limit type specifiers to unsigned long
The background dirty and dirty limits are better defined with type specifiers of unsigned long since negative writeback thresholds are not possible. These values, as returned by get_dirty_limits(), are normally compared with ZVC values to determine whether writeback shall commence or be throttled. Such page counts cannot be negative, so declaring the page limits as signed is unnecessary. Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Andrea Righi <righi.andrea@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 08d2b960b294..4d4074cff300 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -362,13 +362,13 @@ unsigned long determine_dirtyable_memory(void)
}
void
-get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
- struct backing_dev_info *bdi)
+get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
+ unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
{
int background_ratio; /* Percentages */
int dirty_ratio;
- long background;
- long dirty;
+ unsigned long background;
+ unsigned long dirty;
unsigned long available_memory = determine_dirtyable_memory();
struct task_struct *tsk;
@@ -423,9 +423,9 @@ static void balance_dirty_pages(struct address_space *mapping)
{
long nr_reclaimable, bdi_nr_reclaimable;
long nr_writeback, bdi_nr_writeback;
- long background_thresh;
- long dirty_thresh;
- long bdi_thresh;
+ unsigned long background_thresh;
+ unsigned long dirty_thresh;
+ unsigned long bdi_thresh;
unsigned long pages_written = 0;
unsigned long write_chunk = sync_writeback_pages();
@@ -580,8 +580,8 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
void throttle_vm_writeout(gfp_t gfp_mask)
{
- long background_thresh;
- long dirty_thresh;
+ unsigned long background_thresh;
+ unsigned long dirty_thresh;
for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
@@ -624,8 +624,8 @@ static void background_writeout(unsigned long _min_pages)
};
for ( ; ; ) {
- long background_thresh;
- long dirty_thresh;
+ unsigned long background_thresh;
+ unsigned long dirty_thresh;
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
if (global_page_state(NR_FILE_DIRTY) +