summaryrefslogtreecommitdiff
path: root/fs/jbd/checkpoint.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-08-11 19:06:24 +0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-08-18 09:09:01 +0400
commit9cb569d601e0b93e01c20a22872270ec663b75f6 (patch)
tree80b2568fae48018806e82f8884062dae8a5494ae /fs/jbd/checkpoint.c
parent87e99511ea54510ffb60b98001d108794d5037f8 (diff)
downloadlinux-9cb569d601e0b93e01c20a22872270ec663b75f6.tar.xz
remove SWRITE* I/O types
These flags aren't real I/O types, but tell ll_rw_block to always lock the buffer instead of giving up on a failed trylock. Instead add a new write_dirty_buffer helper that implements this semantic and use it from the existing SWRITE* callers. Note that the ll_rw_block code had a bug where it didn't promote WRITE_SYNC_PLUG properly, which this patch fixes. In the ufs code clean up the helper that used to call ll_rw_block to mirror sync_dirty_buffer, which is the function it implements for compound buffers. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/jbd/checkpoint.c')
-rw-r--r--fs/jbd/checkpoint.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index b0435dd0654d..05a38b9c4c0e 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
{
int i;
- ll_rw_block(SWRITE, *batch_count, bhs);
+ for (i = 0; i < *batch_count; i++)
+ write_dirty_buffer(bhs[i], WRITE);
+
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = bhs[i];
clear_buffer_jwrite(bh);