summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 10:52:07 +0300
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 10:52:07 +0300
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /fs/buffer.c
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
downloadlinux-7eaceaccab5f40bbfda044629a6298616aeaed50.tar.xz
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c31
1 files changed, 4 insertions, 27 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 2219a76e2caf..f903f2e5b4fe 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
}
EXPORT_SYMBOL(init_buffer);
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
{
- struct block_device *bd;
- struct buffer_head *bh
- = container_of(word, struct buffer_head, b_state);
-
- smp_mb();
- bd = bh->b_bdev;
- if (bd)
- blk_run_address_space(bd->bd_inode->i_mapping);
io_schedule();
return 0;
}
void __lock_buffer(struct buffer_head *bh)
{
- wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__wait_on_buffer);
@@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
- struct address_space *mapping, *prev_mapping = NULL;
+ struct address_space *mapping;
int err = 0, err2;
INIT_LIST_HEAD(&tmp);
@@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
- if (prev_mapping && prev_mapping != mapping)
- blk_run_address_space(prev_mapping);
- prev_mapping = mapping;
-
brelse(bh);
spin_lock(lock);
}
@@ -3138,17 +3126,6 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-void block_sync_page(struct page *page)
-{
- struct address_space *mapping;
-
- smp_mb();
- mapping = page_mapping(page);
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.