summaryrefslogtreecommitdiff
path: root/fs/btrfs
diff options
context:
space:
mode:
authorLiu Bo <bo.li.liu@oracle.com>2012-08-23 06:10:38 +0400
committerChris Mason <chris.mason@oracle.com>2012-08-29 00:53:41 +0400
commit24c03fa5cf3d02c327cf9f2fc39f72664b1bd7e1 (patch)
treeb0e14cbc951996a4b33fa81e7a19d092e08398d5 /fs/btrfs
parentbd7de2c9a449e26a5493d918618eb20ae60d56bd (diff)
downloadlinux-24c03fa5cf3d02c327cf9f2fc39f72664b1bd7e1.tar.xz
Btrfs: fix a dio write regression
This bug is introduced by commit 3b8bde746f6f9bd36a9f05f5f3b6e334318176a9 (Btrfs: lock extents as we map them in DIO). In dio write, we should unlock the section which we didn't do IO on in case that we fall back to buffered write. But we need to not only unlock the section but also cleanup reserved space for the section. This bug was found while running xfstests 133, with this 133 no longer complains. Signed-off-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/inode.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0808f483dafa..38cda78de5e4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5992,11 +5992,27 @@ unlock:
* in the case of read we need to unlock only the end area that we
* aren't using if there is any left over space.
*/
- if (lockstart < lockend)
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- unlock_bits, 1, 0, &cached_state, GFP_NOFS);
- else
+ if (lockstart < lockend) {
+ if (create && len < lockend - lockstart) {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockstart + len - 1, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ /*
+ * Beside unlock, we also need to cleanup reserved space
+ * for the left range by attaching EXTENT_DO_ACCOUNTING.
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ lockstart + len, lockend,
+ unlock_bits | EXTENT_DO_ACCOUNTING,
+ 1, 0, NULL, GFP_NOFS);
+ } else {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ }
+ } else {
free_extent_state(cached_state);
+ }
free_extent_map(em);