summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_itable.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-06-27 10:04:46 +0400
committerBen Myers <bpm@sgi.com>2013-06-27 22:26:23 +0400
commitcbb2864aa48977205c76291ba5a23331393b2578 (patch)
tree7cf9ee1115ee7032294c3ed5f335abb9f8bc8ff4 /fs/xfs/xfs_itable.c
parent80a4049813a2ae0977d8e5db78e711c7f21c420b (diff)
downloadlinux-cbb2864aa48977205c76291ba5a23331393b2578.tar.xz
xfs: add pluging for bulkstat readahead
I was running some tests on bulkstat on CRC enabled filesystems when I noticed that all the IO being issued was 8k in size, regardless of the fact taht we are issuing sequential 8k buffers for inodes clusters. The IO size should be 16k for 256 byte inodes, and 32k for 512 byte inodes, but this wasn't happening. blktrace showed that there was an explict plug and unplug happening around each readahead IO from _xfs_buf_ioapply, and the unplug was causing the IO to be issued immediately. Hence no opportunity was being given to the elevator to merge adjacent readahead requests and dispatch them as a single IO. Add plugging around the inode chunk readahead dispatch loop in bulkstat to ensure that we don't unplug the queue between adjacent inode buffer readahead IOs and so we get fewer, larger IO requests hitting the storage subsystem for bulkstat. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_itable.c')
-rw-r--r--fs/xfs/xfs_itable.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 2ea7d402188d..06d004d85bf4 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -383,11 +383,13 @@ xfs_bulkstat(
* Also start read-ahead now for this chunk.
*/
if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
+ struct blk_plug plug;
/*
* Loop over all clusters in the next chunk.
* Do a readahead if there are any allocated
* inodes in that cluster.
*/
+ blk_start_plug(&plug);
agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
for (chunkidx = 0;
chunkidx < XFS_INODES_PER_CHUNK;
@@ -399,6 +401,7 @@ xfs_bulkstat(
agbno, nbcluster,
&xfs_inode_buf_ops);
}
+ blk_finish_plug(&plug);
irbp->ir_startino = r.ir_startino;
irbp->ir_freecount = r.ir_freecount;
irbp->ir_free = r.ir_free;