summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 05:40:42 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 05:40:42 +0300
commitf1ee3b8829006b3fda999f00f0059aa327e3f3d0 (patch)
tree63c68a2568bc3d28f7b4d4199a5032f8eac748c5 /fs
parenta725cb4d708e5ac8bc76a70b3002ff64c07312d8 (diff)
parentb42fe98c92698d2a10094997e5f4d2dd968fd44f (diff)
downloadlinux-f1ee3b8829006b3fda999f00f0059aa327e3f3d0.tar.xz
Merge tag 'for-5.11-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "We have a mix of all kinds of changes, feature updates, core stuff, performance improvements and lots of cleanups and preparatory changes. User visible: - export filesystem generation in sysfs - new features for mount option 'rescue': - what's currently supported is exported in sysfs - 'ignorebadroots'/'ibadroots' - continue even if some essential tree roots are not usable (extent, uuid, data reloc, device, csum, free space) - 'ignoredatacsums'/'idatacsums' - skip checksum verification on data - 'all' - now enables 'ignorebadroots' + 'ignoredatacsums' + 'nologreplay' - export read mirror policy settings to sysfs, new policies will be added in the future - remove inode number cache feature (mount -o inode_cache), obsoleted in 5.9 User visible fixes: - async discard scheduling fixes on high loads - update inode byte counter atomically so stat() does not report wrong value in some cases - free space tree fixes: - correctly report status of v2 after remount - clear v1 cache inodes when v2 is newly enabled after remount Core: - switch own tree lock implementation to standard rw semaphore: - one-level lock nesting is not required anymore, the last use of this was in free space that's now loaded asynchronously - own implementation of adaptive spinning before taking mutex has been part of rwsem - performance seems to be better in general, much better (+tens of percents) for some workloads - lockdep does not complain - finish direct IO conversion to iomap infrastructure, remove temporary workaround for DSYNC after iomap API updates - preparatory work to support data and metadata blocks smaller than page: - generalize code that assumes sectorsize == PAGE_SIZE, lots of refactoring - planned namely for 64K pages (eg. arm64, ppc64) - scrub read-only support - preparatory work for zoned allocation mode (SMR/ZBC/ZNS friendly): - disable incompatible features - round-robin superblock write - free space cache (v1) is loaded asynchronously, remove tree path recursion - slightly improved time tacking for transaction kthread wake ups Performance improvements (note that the numbers depend on load type or other features and weren't run on the same machine): - skip unnecessary work: - do not start readahead for csum tree when scrubbing non-data block groups - do not start and wait for delalloc on snapshot roots on transaction commit - fix race when defragmenting leads to unnecessary IO - dbench speedups (+throughput%/-max latency%): - skip unnecessary searches for xattrs when logging an inode (+10.8/-8.2) - stop incrementing log batch when joining log transaction (1-2) - unlock path before checking if extent is shared during nocow writeback (+5.0/-20.5), on fio load +9.7% throughput/-9.8% runtime - several tree log improvements, eg. removing unnecessary operations, fixing races that lead to additional work (+12.7/-8.2) - tree-checker error branches annotated with unlikely() (+3% throughput) Other: - cleanups - lockdep fixes - more btrfs_inode conversions - error variable cleanups" * tag 'for-5.11-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (198 commits) btrfs: scrub: allow scrub to work with subpage sectorsize btrfs: scrub: support subpage data scrub btrfs: scrub: support subpage tree block scrub btrfs: scrub: always allocate one full page for one sector for RAID56 btrfs: scrub: reduce width of extent_len/stripe_len from 64 to 32 bits btrfs: refactor btrfs_lookup_bio_sums to handle out-of-order bvecs btrfs: remove btrfs_find_ordered_sum call from btrfs_lookup_bio_sums btrfs: handle sectorsize < PAGE_SIZE case for extent buffer accessors btrfs: update num_extent_pages to support subpage sized extent buffer btrfs: don't allow tree block to cross page boundary for subpage support btrfs: calculate inline extent buffer page size based on page size btrfs: factor out btree page submission code to a helper btrfs: make btrfs_verify_data_csum follow sector size btrfs: pass bio_offset to check_data_csum() directly btrfs: rename bio_offset of extent_submit_bio_start_t to dio_file_offset btrfs: fix lockdep warning when creating free space tree btrfs: skip space_cache v1 setup when not using it btrfs: remove free space items when disabling space cache v1 btrfs: warn when remount will not change the free space tree btrfs: use superblock state to print space_cache mount option ...
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/Makefile3
-rw-r--r--fs/btrfs/backref.c19
-rw-r--r--fs/btrfs/block-group.c268
-rw-r--r--fs/btrfs/block-group.h2
-rw-r--r--fs/btrfs/block-rsv.c8
-rw-r--r--fs/btrfs/btrfs_inode.h23
-rw-r--r--fs/btrfs/check-integrity.c11
-rw-r--r--fs/btrfs/compression.c28
-rw-r--r--fs/btrfs/ctree.c258
-rw-r--r--fs/btrfs/ctree.h213
-rw-r--r--fs/btrfs/delayed-inode.c23
-rw-r--r--fs/btrfs/delayed-inode.h3
-rw-r--r--fs/btrfs/dev-replace.c20
-rw-r--r--fs/btrfs/dir-item.c1
-rw-r--r--fs/btrfs/discard.c46
-rw-r--r--fs/btrfs/discard.h3
-rw-r--r--fs/btrfs/disk-io.c689
-rw-r--r--fs/btrfs/disk-io.h25
-rw-r--r--fs/btrfs/export.c1
-rw-r--r--fs/btrfs/extent-io-tree.h71
-rw-r--r--fs/btrfs/extent-tree.c111
-rw-r--r--fs/btrfs/extent_io.c656
-rw-r--r--fs/btrfs/extent_io.h50
-rw-r--r--fs/btrfs/file-item.c344
-rw-r--r--fs/btrfs/file.c737
-rw-r--r--fs/btrfs/free-space-cache.c558
-rw-r--r--fs/btrfs/free-space-cache.h22
-rw-r--r--fs/btrfs/free-space-tree.c26
-rw-r--r--fs/btrfs/inode-item.c6
-rw-r--r--fs/btrfs/inode-map.c582
-rw-r--r--fs/btrfs/inode-map.h16
-rw-r--r--fs/btrfs/inode.c815
-rw-r--r--fs/btrfs/ioctl.c64
-rw-r--r--fs/btrfs/locking.c459
-rw-r--r--fs/btrfs/locking.h24
-rw-r--r--fs/btrfs/ordered-data.c45
-rw-r--r--fs/btrfs/ordered-data.h5
-rw-r--r--fs/btrfs/print-tree.c15
-rw-r--r--fs/btrfs/qgroup.c52
-rw-r--r--fs/btrfs/raid56.c8
-rw-r--r--fs/btrfs/reada.c34
-rw-r--r--fs/btrfs/ref-verify.c27
-rw-r--r--fs/btrfs/reflink.c18
-rw-r--r--fs/btrfs/relocation.c116
-rw-r--r--fs/btrfs/scrub.c340
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/struct-funcs.c18
-rw-r--r--fs/btrfs/super.c179
-rw-r--r--fs/btrfs/sysfs.c117
-rw-r--r--fs/btrfs/tests/btrfs-tests.c3
-rw-r--r--fs/btrfs/tests/extent-io-tests.c26
-rw-r--r--fs/btrfs/tests/free-space-tests.c1
-rw-r--r--fs/btrfs/tests/qgroup-tests.c4
-rw-r--r--fs/btrfs/transaction.c126
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/tree-checker.c337
-rw-r--r--fs/btrfs/tree-defrag.c1
-rw-r--r--fs/btrfs/tree-log.c183
-rw-r--r--fs/btrfs/uuid-tree.c3
-rw-r--r--fs/btrfs/volumes.c143
-rw-r--r--fs/btrfs/volumes.h21
-rw-r--r--fs/btrfs/xattr.c8
-rw-r--r--fs/btrfs/zoned.c616
-rw-r--r--fs/btrfs/zoned.h160
64 files changed, 4488 insertions, 4312 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index e738f6206ea5..9f1b1a88e317 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_BTRFS_FS) := btrfs.o
btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
- file-item.o inode-item.o inode-map.o disk-io.o \
+ file-item.o inode-item.o disk-io.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
@@ -16,6 +16,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
+btrfs-$(CONFIG_BLK_DEV_ZONED) += zoned.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 771a036867dc..02d7d7b2563b 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -783,8 +783,8 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
BUG_ON(ref->key_for_search.type);
BUG_ON(!ref->wanted_disk_byte);
- eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
- ref->level - 1, NULL);
+ eb = read_tree_block(fs_info, ref->wanted_disk_byte,
+ ref->root_id, 0, ref->level - 1, NULL);
if (IS_ERR(eb)) {
free_pref(ref);
return PTR_ERR(eb);
@@ -1331,7 +1331,7 @@ again:
struct extent_buffer *eb;
eb = read_tree_block(fs_info, ref->parent, 0,
- ref->level, NULL);
+ 0, ref->level, NULL);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
@@ -1341,14 +1341,12 @@ again:
goto out;
}
- if (!path->skip_locking) {
+ if (!path->skip_locking)
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_read(eb);
- }
ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie, ignore_offset);
if (!path->skip_locking)
- btrfs_tree_read_unlock_blocking(eb);
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
if (ret < 0)
goto out;
@@ -1671,13 +1669,11 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
s64 bytes_left = ((s64)size) - 1;
struct extent_buffer *eb = eb_in;
struct btrfs_key found_key;
- int leave_spinning = path->leave_spinning;
struct btrfs_inode_ref *iref;
if (bytes_left >= 0)
dest[bytes_left] = '\0';
- path->leave_spinning = 1;
while (1) {
bytes_left -= name_len;
if (bytes_left >= 0)
@@ -1685,7 +1681,7 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
name_off, name_len);
if (eb != eb_in) {
if (!path->skip_locking)
- btrfs_tree_read_unlock_blocking(eb);
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
}
ret = btrfs_find_item(fs_root, path, parent, 0,
@@ -1705,8 +1701,6 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
eb = path->nodes[0];
/* make sure we can use eb after releasing the path */
if (eb != eb_in) {
- if (!path->skip_locking)
- btrfs_set_lock_blocking_read(eb);
path->nodes[0] = NULL;
path->locks[0] = 0;
}
@@ -1723,7 +1717,6 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
}
btrfs_release_path(path);
- path->leave_spinning = leave_spinning;
if (ret)
return ERR_PTR(ret);
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 3ba6f3839d39..52f2198d44c9 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -424,6 +424,23 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
return ret;
}
+static bool space_cache_v1_done(struct btrfs_block_group *cache)
+{
+ bool ret;
+
+ spin_lock(&cache->lock);
+ ret = cache->cached != BTRFS_CACHE_FAST;
+ spin_unlock(&cache->lock);
+
+ return ret;
+}
+
+void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
+ struct btrfs_caching_control *caching_ctl)
+{
+ wait_event(caching_ctl->wait, space_cache_v1_done(cache));
+}
+
#ifdef CONFIG_BTRFS_DEBUG
static void fragment_free_space(struct btrfs_block_group *block_group)
{
@@ -639,11 +656,28 @@ static noinline void caching_thread(struct btrfs_work *work)
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
+ if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
+ ret = load_free_space_cache(block_group);
+ if (ret == 1) {
+ ret = 0;
+ goto done;
+ }
+
+ /*
+ * We failed to load the space cache, set ourselves to
+ * CACHE_STARTED and carry on.
+ */
+ spin_lock(&block_group->lock);
+ block_group->cached = BTRFS_CACHE_STARTED;
+ spin_unlock(&block_group->lock);
+ wake_up(&caching_ctl->wait);
+ }
+
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
ret = load_free_space_tree(caching_ctl);
else
ret = load_extent_tree_free(caching_ctl);
-
+done:
spin_lock(&block_group->lock);
block_group->caching_ctl = NULL;
block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
@@ -679,7 +713,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
{
DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
- struct btrfs_caching_control *caching_ctl;
+ struct btrfs_caching_control *caching_ctl = NULL;
int ret = 0;
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
@@ -691,119 +725,41 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache;
caching_ctl->progress = cache->start;
- refcount_set(&caching_ctl->count, 1);
+ refcount_set(&caching_ctl->count, 2);
btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
spin_lock(&cache->lock);
- /*
- * This should be a rare occasion, but this could happen I think in the
- * case where one thread starts to load the space cache info, and then
- * some other thread starts a transaction commit which tries to do an
- * allocation while the other thread is still loading the space cache
- * info. The previous loop should have kept us from choosing this block
- * group, but if we've moved to the state where we will wait on caching
- * block groups we need to first check if we're doing a fast load here,
- * so we can wait for it to finish, otherwise we could end up allocating
- * from a block group who's cache gets evicted for one reason or
- * another.
- */
- while (cache->cached == BTRFS_CACHE_FAST) {
- struct btrfs_caching_control *ctl;
-
- ctl = cache->caching_ctl;
- refcount_inc(&ctl->count);
- prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&cache->lock);
-
- schedule();
-
- finish_wait(&ctl->wait, &wait);
- btrfs_put_caching_control(ctl);
- spin_lock(&cache->lock);
- }
-
if (cache->cached != BTRFS_CACHE_NO) {
- spin_unlock(&cache->lock);
kfree(caching_ctl);
- return 0;
+
+ caching_ctl = cache->caching_ctl;
+ if (caching_ctl)
+ refcount_inc(&caching_ctl->count);
+ spin_unlock(&cache->lock);
+ goto out;
}
WARN_ON(cache->caching_ctl);
cache->caching_ctl = caching_ctl;
- cache->cached = BTRFS_CACHE_FAST;
+ if (btrfs_test_opt(fs_info, SPACE_CACHE))
+ cache->cached = BTRFS_CACHE_FAST;
+ else
+ cache->cached = BTRFS_CACHE_STARTED;
+ cache->has_caching_ctl = 1;
spin_unlock(&cache->lock);
- if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
- mutex_lock(&caching_ctl->mutex);
- ret = load_free_space_cache(cache);
-
- spin_lock(&cache->lock);
- if (ret == 1) {
- cache->caching_ctl = NULL;
- cache->cached = BTRFS_CACHE_FINISHED;
- cache->last_byte_to_unpin = (u64)-1;
- caching_ctl->progress = (u64)-1;
- } else {
- if (load_cache_only) {
- cache->caching_ctl = NULL;
- cache->cached = BTRFS_CACHE_NO;
- } else {
- cache->cached = BTRFS_CACHE_STARTED;
- cache->has_caching_ctl = 1;
- }
- }
- spin_unlock(&cache->lock);
-#ifdef CONFIG_BTRFS_DEBUG
- if (ret == 1 &&
- btrfs_should_fragment_free_space(cache)) {
- u64 bytes_used;
-
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- bytes_used = cache->length - cache->used;
- cache->space_info->bytes_used += bytes_used >> 1;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
- fragment_free_space(cache);
- }
-#endif
- mutex_unlock(&caching_ctl->mutex);
-
- wake_up(&caching_ctl->wait);
- if (ret == 1) {
- btrfs_put_caching_control(caching_ctl);
- btrfs_free_excluded_extents(cache);
- return 0;
- }
- } else {
- /*
- * We're either using the free space tree or no caching at all.
- * Set cached to the appropriate value and wakeup any waiters.
- */
- spin_lock(&cache->lock);
- if (load_cache_only) {
- cache->caching_ctl = NULL;
- cache->cached = BTRFS_CACHE_NO;
- } else {
- cache->cached = BTRFS_CACHE_STARTED;
- cache->has_caching_ctl = 1;
- }
- spin_unlock(&cache->lock);
- wake_up(&caching_ctl->wait);
- }
-
- if (load_cache_only) {
- btrfs_put_caching_control(caching_ctl);
- return 0;
- }
-
- down_write(&fs_info->commit_root_sem);
+ spin_lock(&fs_info->block_group_cache_lock);
refcount_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
- up_write(&fs_info->commit_root_sem);
+ spin_unlock(&fs_info->block_group_cache_lock);
btrfs_get_block_group(cache);
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
+out:
+ if (load_cache_only && caching_ctl)
+ btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
+ if (caching_ctl)
+ btrfs_put_caching_control(caching_ctl);
return ret;
}
@@ -892,8 +848,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_block_group *block_group;
struct btrfs_free_cluster *cluster;
- struct btrfs_root *tree_root = fs_info->tree_root;
- struct btrfs_key key;
struct inode *inode;
struct kobject *kobj = NULL;
int ret;
@@ -971,42 +925,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&trans->transaction->dirty_bgs_lock);
mutex_unlock(&trans->transaction->cache_write_mutex);
- if (!IS_ERR(inode)) {
- ret = btrfs_orphan_add(trans, BTRFS_I(inode));
- if (ret) {
- btrfs_add_delayed_iput(inode);
- goto out;
- }
- clear_nlink(inode);
- /* One for the block groups ref */
- spin_lock(&block_group->lock);
- if (block_group->iref) {
- block_group->iref = 0;
- block_group->inode = NULL;
- spin_unlock(&block_group->lock);
- iput(inode);
- } else {
- spin_unlock(&block_group->lock);
- }
- /* One for our lookup ref */
- btrfs_add_delayed_iput(inode);
- }
-
- key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.type = 0;
- key.offset = block_group->start;
-
- ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- if (ret < 0)
+ ret = btrfs_remove_free_space_inode(trans, inode, block_group);
+ if (ret)
goto out;
- if (ret > 0)
- btrfs_release_path(path);
- if (ret == 0) {
- ret = btrfs_del_item(trans, tree_root, path);
- if (ret)
- goto out;
- btrfs_release_path(path);
- }
spin_lock(&fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
@@ -1043,7 +964,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (block_group->cached == BTRFS_CACHE_STARTED)
btrfs_wait_block_group_cache_done(block_group);
if (block_group->has_caching_ctl) {
- down_write(&fs_info->commit_root_sem);
+ spin_lock(&fs_info->block_group_cache_lock);
if (!caching_ctl) {
struct btrfs_caching_control *ctl;
@@ -1057,7 +978,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
}
if (caching_ctl)
list_del_init(&caching_ctl->list);
- up_write(&fs_info->commit_root_sem);
+ spin_unlock(&fs_info->block_group_cache_lock);
if (caching_ctl) {
/* Once for the caching bgs list and once for us. */
btrfs_put_caching_control(caching_ctl);
@@ -1723,6 +1644,7 @@ out:
static int exclude_super_stripes(struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
+ const bool zoned = btrfs_is_zoned(fs_info);
u64 bytenr;
u64 *logical;
int stripe_len;
@@ -1744,6 +1666,14 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
if (ret)
return ret;
+ /* Shouldn't have super stripes in sequential zones */
+ if (zoned && nr) {
+ btrfs_err(fs_info,
+ "zoned: block group %llu must not contain super block",
+ cache->start);
+ return -EUCLEAN;
+ }
+
while (nr--) {
u64 len = min_t(u64, stripe_len,
cache->start + cache->length - logical[nr]);
@@ -1805,7 +1735,7 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
INIT_LIST_HEAD(&cache->discard_list);
INIT_LIST_HEAD(&cache->dirty_list);
INIT_LIST_HEAD(&cache->io_list);
- btrfs_init_free_space_ctl(cache);
+ btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
atomic_set(&cache->frozen, 0);
mutex_init(&cache->free_space_lock);
btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
@@ -1985,6 +1915,51 @@ error:
return ret;
}
+static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+ struct btrfs_space_info *space_info;
+ struct rb_node *node;
+ int ret = 0;
+
+ for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct btrfs_block_group *bg;
+
+ em = rb_entry(node, struct extent_map, rb_node);
+ map = em->map_lookup;
+ bg = btrfs_create_block_group_cache(fs_info, em->start);
+ if (!bg) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ /* Fill dummy cache as FULL */
+ bg->length = em->len;
+ bg->flags = map->type;
+ bg->last_byte_to_unpin = (u64)-1;
+ bg->cached = BTRFS_CACHE_FINISHED;
+ bg->used = em->len;
+ bg->flags = map->type;
+ ret = btrfs_add_block_group_cache(fs_info, bg);
+ if (ret) {
+ btrfs_remove_free_space_cache(bg);
+ btrfs_put_block_group(bg);
+ break;
+ }
+ btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
+ 0, &space_info);
+ bg->space_info = space_info;
+ link_block_group(bg);
+
+ set_avail_alloc_bits(fs_info, bg->flags);
+ }
+ if (!ret)
+ btrfs_init_global_block_rsv(fs_info);
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_path *path;
@@ -1995,6 +1970,9 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
int need_clear = 0;
u64 cache_gen;
+ if (!info->extent_root)
+ return fill_dummy_bgs(info);
+
key.objectid = 0;
key.offset = 0;
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
@@ -2152,7 +2130,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- cache->needs_free_space = 1;
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+ cache->needs_free_space = 1;
ret = exclude_super_stripes(cache);
if (ret) {
/* We may have excluded something, so call this just in case */
@@ -2361,6 +2340,9 @@ static int cache_save_setup(struct btrfs_block_group *block_group,
int retries = 0;
int ret = 0;
+ if (!btrfs_test_opt(fs_info, SPACE_CACHE))
+ return 0;
+
/*
* If this block group is smaller than 100 megs don't bother caching the
* block group.
@@ -2401,7 +2383,7 @@ again:
* time.
*/
BTRFS_I(inode)->generation = 0;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret) {
/*
* So theoretically we could recover from this, simply set the
@@ -3307,14 +3289,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
- down_write(&info->commit_root_sem);
+ spin_lock(&info->block_group_cache_lock);
while (!list_empty(&info->caching_block_groups)) {
caching_ctl = list_entry(info->caching_block_groups.next,
struct btrfs_caching_control, list);
list_del(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
}
- up_write(&info->commit_root_sem);
+ spin_unlock(&info->block_group_cache_lock);
spin_lock(&info->unused_bgs_lock);
while (!list_empty(&info->unused_bgs)) {
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index adfd7583a17b..8f74a96074f7 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -268,6 +268,8 @@ void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
+void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
+ struct btrfs_caching_control *caching_ctl);
static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
{
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index bc920afe23bf..04a6226e0388 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -426,6 +426,14 @@ void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
fs_info->delayed_block_rsv.space_info = space_info;
fs_info->delayed_refs_rsv.space_info = space_info;
+ /*
+ * Our various recovery options can leave us with NULL roots, so check
+ * here and just bail before we go dereferencing NULLs everywhere.
+ */
+ if (!fs_info->extent_root || !fs_info->csum_root ||
+ !fs_info->dev_root || !fs_info->chunk_root || !fs_info->tree_root)
+ return;
+
fs_info->extent_root->block_rsv = &fs_info->delayed_refs_rsv;
fs_info->csum_root->block_rsv = &fs_info->delayed_refs_rsv;
fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 92dd86bceae3..555cbcef6585 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -35,6 +35,13 @@ enum {
BTRFS_INODE_IN_DELALLOC_LIST,
BTRFS_INODE_HAS_PROPS,
BTRFS_INODE_SNAPSHOT_FLUSH,
+ /*
+ * Set and used when logging an inode and it serves to signal that an
+ * inode does not have xattrs, so subsequent fsyncs can avoid searching
+ * for xattrs to log. This bit must be cleared whenever a xattr is added
+ * to an inode.
+ */
+ BTRFS_INODE_NO_XATTRS,
};
/* in memory btrfs inode */
@@ -50,7 +57,8 @@ struct btrfs_inode {
/*
* Lock for counters and all fields used to determine if the inode is in
* the log or not (last_trans, last_sub_trans, last_log_commit,
- * logged_trans).
+ * logged_trans), to access/update new_delalloc_bytes and to update the
+ * VFS' inode number of bytes used.
*/
spinlock_t lock;
@@ -203,16 +211,6 @@ struct btrfs_inode {
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
- /*
- * To avoid races between lockless (i_mutex not held) direct IO writes
- * and concurrent fsync requests. Direct IO writes must acquire read
- * access on this semaphore for creating an extent map and its
- * corresponding ordered extent. The fast fsync path must acquire write
- * access on this semaphore before it collects ordered extents and
- * extent maps.
- */
- struct rw_semaphore dio_sem;
-
struct inode vfs_inode;
};
@@ -341,8 +339,7 @@ static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
{
struct btrfs_root *root = inode->root;
- struct btrfs_super_block *sb = root->fs_info->super_copy;
- const u16 csum_size = btrfs_super_csum_size(sb);
+ const u32 csum_size = root->fs_info->csum_size;
/* Output minus objectid, which is more meaningful */
if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 81a8c87a5afb..6ff44e53814c 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -233,7 +233,6 @@ struct btrfsic_stack_frame {
struct btrfsic_state {
u32 print_mask;
int include_extent_data;
- int csum_size;
struct list_head all_blocks_list;
struct btrfsic_block_hashtable block_hashtable;
struct btrfsic_block_link_hashtable block_link_hashtable;
@@ -660,8 +659,6 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
return -1;
}
- state->csum_size = btrfs_super_csum_size(selected_super);
-
for (pass = 0; pass < 3; pass++) {
int num_copies;
int mirror_num;
@@ -954,7 +951,7 @@ static noinline_for_stack int btrfsic_process_metablock(
sf->prev = NULL;
continue_with_new_stack_frame:
- sf->block->generation = le64_to_cpu(sf->hdr->generation);
+ sf->block->generation = btrfs_stack_header_generation(sf->hdr);
if (0 == sf->hdr->level) {
struct btrfs_leaf *const leafhdr =
(struct btrfs_leaf *)sf->hdr;
@@ -1723,7 +1720,7 @@ static noinline_for_stack int btrfsic_test_for_metadata(
crypto_shash_update(shash, data, sublen);
}
crypto_shash_final(shash, csum);
- if (memcmp(csum, h->csum, state->csum_size))
+ if (memcmp(csum, h->csum, fs_info->csum_size))
return 1;
return 0; /* is metadata */
@@ -2695,8 +2692,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n",
bio_op(bio), bio->bi_opf, segs,
- (unsigned long long)bio->bi_iter.bi_sector,
- dev_bytenr, bio->bi_disk);
+ bio->bi_iter.bi_sector, dev_bytenr, bio->bi_disk);
mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS);
@@ -2797,7 +2793,6 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
state->fs_info = fs_info;
state->print_mask = print_mask;
state->include_extent_data = including_extent_data;
- state->csum_size = 0;
state->metablock_size = fs_info->nodesize;
state->datablock_size = fs_info->sectorsize;
INIT_LIST_HEAD(&state->all_blocks_list);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index eeface30facd..5ae3fa0386b7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -131,10 +131,8 @@ static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
{
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
-
return sizeof(struct compressed_bio) +
- (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
+ (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
}
static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
@@ -142,7 +140,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csum_size = fs_info->csum_size;
struct page *page;
unsigned long i;
char *kaddr;
@@ -150,7 +148,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
struct compressed_bio *cb = bio->bi_private;
u8 *cb_sum = cb->sums;
- if (inode->flags & BTRFS_INODE_NODATASUM)
+ if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
return 0;
shash->tfm = fs_info->csum_shash;
@@ -220,7 +218,7 @@ static void end_compressed_bio_read(struct bio *bio)
inode = cb->inode;
ret = check_compressed_csum(BTRFS_I(inode), bio,
- (u64)bio->bi_iter.bi_sector << 9);
+ bio->bi_iter.bi_sector << 9);
if (ret)
goto csum_failed;
@@ -622,13 +620,12 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
unsigned long pg_index;
struct page *page;
struct bio *comp_bio;
- u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
+ u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
u64 em_len;
u64 em_start;
struct extent_map *em;
blk_status_t ret = BLK_STS_RESOURCE;
int faili = 0;
- const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
u8 *sums;
em_tree = &BTRFS_I(inode)->extent_tree;
@@ -722,15 +719,12 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
*/
refcount_inc(&cb->pending_bios);
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = btrfs_lookup_bio_sums(inode, comp_bio,
- (u64)-1, sums);
- BUG_ON(ret); /* -ENOMEM */
- }
+ ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
+ BUG_ON(ret); /* -ENOMEM */
nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
fs_info->sectorsize);
- sums += csum_size * nr_sectors;
+ sums += fs_info->csum_size * nr_sectors;
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret) {
@@ -751,10 +745,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = btrfs_lookup_bio_sums(inode, comp_bio, (u64)-1, sums);
- BUG_ON(ret); /* -ENOMEM */
- }
+ ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret) {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 113da62dc17f..07810891e204 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1278,14 +1278,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
if (!tm)
return eb;
- btrfs_set_path_blocking(path);
- btrfs_set_lock_blocking_read(eb);
-
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
BUG_ON(tm->slot != 0);
eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
if (!eb_rewin) {
- btrfs_tree_read_unlock_blocking(eb);
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
return NULL;
}
@@ -1297,13 +1294,13 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
} else {
eb_rewin = btrfs_clone_extent_buffer(eb);
if (!eb_rewin) {
- btrfs_tree_read_unlock_blocking(eb);
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
return NULL;
}
}
- btrfs_tree_read_unlock_blocking(eb);
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
@@ -1356,7 +1353,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- old = read_tree_block(fs_info, logical, 0, level, NULL);
+ old = read_tree_block(fs_info, logical, root->root_key.objectid,
+ 0, level, NULL);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old))
free_extent_buffer(old);
@@ -1373,9 +1371,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(fs_info, logical);
} else {
- btrfs_set_lock_blocking_read(eb_root);
eb = btrfs_clone_extent_buffer(eb_root);
- btrfs_tree_read_unlock_blocking(eb_root);
+ btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
}
@@ -1483,10 +1480,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
search_start = buf->start & ~((u64)SZ_1G - 1);
- if (parent)
- btrfs_set_lock_blocking_write(parent);
- btrfs_set_lock_blocking_write(buf);
-
/*
* Before CoWing this block for later modification, check if it's
* the subtree root and do the delayed subtree trace if needed.
@@ -1578,7 +1571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur;
u64 blocknr;
- u64 gen;
u64 search_start = *last_ret;
u64 last_block = 0;
u64 other;
@@ -1586,14 +1578,10 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
int end_slot;
int i;
int err = 0;
- int parent_level;
- int uptodate;
u32 blocksize;
int progress_passed = 0;
struct btrfs_disk_key disk_key;
- parent_level = btrfs_header_level(parent);
-
WARN_ON(trans->transaction != fs_info->running_transaction);
WARN_ON(trans->transid != fs_info->generation);
@@ -1604,10 +1592,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (parent_nritems <= 1)
return 0;
- btrfs_set_lock_blocking_write(parent);
-
for (i = start_slot; i <= end_slot; i++) {
- struct btrfs_key first_key;
int close = 1;
btrfs_node_key(parent, &disk_key, i);
@@ -1616,8 +1601,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
progress_passed = 1;
blocknr = btrfs_node_blockptr(parent, i);
- gen = btrfs_node_ptr_generation(parent, i);
- btrfs_node_key_to_cpu(parent, &first_key, i);
if (last_block == 0)
last_block = blocknr;
@@ -1634,36 +1617,13 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
continue;
}
- cur = find_extent_buffer(fs_info, blocknr);
- if (cur)
- uptodate = btrfs_buffer_uptodate(cur, gen, 0);
- else
- uptodate = 0;
- if (!cur || !uptodate) {
- if (!cur) {
- cur = read_tree_block(fs_info, blocknr, gen,
- parent_level - 1,
- &first_key);
- if (IS_ERR(cur)) {
- return PTR_ERR(cur);
- } else if (!extent_buffer_uptodate(cur)) {
- free_extent_buffer(cur);
- return -EIO;
- }
- } else if (!uptodate) {
- err = btrfs_read_buffer(cur, gen,
- parent_level - 1,&first_key);
- if (err) {
- free_extent_buffer(cur);
- return err;
- }
- }
- }
+ cur = btrfs_read_node_slot(parent, i);
+ if (IS_ERR(cur))
+ return PTR_ERR(cur);
if (search_start == 0)
search_start = last_block;
btrfs_tree_lock(cur);
- btrfs_set_lock_blocking_write(cur);
err = __btrfs_cow_block(trans, root, cur, parent, i,
&cur, search_start,
min(16 * blocksize,
@@ -1723,9 +1683,10 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
oip = offset_in_page(offset);
if (oip + key_size <= PAGE_SIZE) {
- const unsigned long idx = offset >> PAGE_SHIFT;
+ const unsigned long idx = get_eb_page_index(offset);
char *kaddr = page_address(eb->pages[idx]);
+ oip = get_eb_offset_in_page(eb, offset);
tmp = (struct btrfs_disk_key *)(kaddr + oip);
} else {
read_extent_buffer(eb, &unaligned, offset, key_size);
@@ -1801,6 +1762,7 @@ struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
btrfs_node_key_to_cpu(parent, &first_key, slot);
eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
+ btrfs_header_owner(parent),
btrfs_node_ptr_generation(parent, slot),
level - 1, &first_key);
if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
@@ -1835,8 +1797,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
mid = path->nodes[level];
- WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
- path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
+ WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
WARN_ON(btrfs_header_generation(mid) != trans->transid);
orig_ptr = btrfs_node_blockptr(mid, orig_slot);
@@ -1865,7 +1826,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
btrfs_tree_lock(child);
- btrfs_set_lock_blocking_write(child);
ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
BTRFS_NESTING_COW);
if (ret) {
@@ -1904,7 +1864,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (left) {
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
- btrfs_set_lock_blocking_write(left);
wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left,
BTRFS_NESTING_LEFT_COW);
@@ -1920,7 +1879,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (right) {
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
- btrfs_set_lock_blocking_write(right);
wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right,
BTRFS_NESTING_RIGHT_COW);
@@ -2084,7 +2042,6 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
u32 left_nr;
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
- btrfs_set_lock_blocking_write(left);
left_nr = btrfs_header_nritems(left);
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -2139,7 +2096,6 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
u32 right_nr;
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
- btrfs_set_lock_blocking_write(right);
right_nr = btrfs_header_nritems(right);
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -2243,7 +2199,7 @@ static void reada_for_search(struct btrfs_fs_info *fs_info,
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
- readahead_tree_block(fs_info, search);
+ btrfs_readahead_node_child(node, nr);
nread += blocksize;
}
nscan++;
@@ -2252,16 +2208,11 @@ static void reada_for_search(struct btrfs_fs_info *fs_info,
}
}
-static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path, int level)
+static noinline void reada_for_balance(struct btrfs_path *path, int level)
{
+ struct extent_buffer *parent;
int slot;
int nritems;
- struct extent_buffer *parent;
- struct extent_buffer *eb;
- u64 gen;
- u64 block1 = 0;
- u64 block2 = 0;
parent = path->nodes[level + 1];
if (!parent)
@@ -2270,32 +2221,10 @@ static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
nritems = btrfs_header_nritems(parent);
slot = path->slots[level + 1];
- if (slot > 0) {
- block1 = btrfs_node_blockptr(parent, slot - 1);
- gen = btrfs_node_ptr_generation(parent, slot - 1);
- eb = find_extent_buffer(fs_info, block1);
- /*
- * if we get -eagain from btrfs_buffer_uptodate, we
- * don't want to return eagain here. That will loop
- * forever
- */
- if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
- block1 = 0;
- free_extent_buffer(eb);
- }
- if (slot + 1 < nritems) {
- block2 = btrfs_node_blockptr(parent, slot + 1);
- gen = btrfs_node_ptr_generation(parent, slot + 1);
- eb = find_extent_buffer(fs_info, block2);
- if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
- block2 = 0;
- free_extent_buffer(eb);
- }
-
- if (block1)
- readahead_tree_block(fs_info, block1);
- if (block2)
- readahead_tree_block(fs_info, block2);
+ if (slot > 0)
+ btrfs_readahead_node_child(parent, slot - 1);
+ if (slot + 1 < nritems)
+ btrfs_readahead_node_child(parent, slot + 1);
}
@@ -2399,14 +2328,6 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
return 0;
}
- /* the pages were up to date, but we failed
- * the generation number check. Do a full
- * read for the generation number that is correct.
- * We must do this without dropping locks so
- * we can trust our generation number
- */
- btrfs_set_path_blocking(p);
-
/* now we're allowed to do a blocking uptodate check */
ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
if (!ret) {
@@ -2426,14 +2347,13 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
* out which blocks to read.
*/
btrfs_unlock_up_safe(p, level + 1);
- btrfs_set_path_blocking(p);
if (p->reada != READA_NONE)
reada_for_search(fs_info, p, level, slot, key->objectid);
ret = -EAGAIN;
- tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
- &first_key);
+ tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
+ gen, parent_level - 1, &first_key);
if (!IS_ERR(tmp)) {
/*
* If the read above didn't mark this buffer up to date,
@@ -2468,58 +2388,42 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
int *write_lock_level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- int ret;
+ int ret = 0;
if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
- int sret;
if (*write_lock_level < level + 1) {
*write_lock_level = level + 1;
btrfs_release_path(p);
- goto again;
+ return -EAGAIN;
}
- btrfs_set_path_blocking(p);
- reada_for_balance(fs_info, p, level);
- sret = split_node(trans, root, p, level);
+ reada_for_balance(p, level);
+ ret = split_node(trans, root, p, level);
- BUG_ON(sret > 0);
- if (sret) {
- ret = sret;
- goto done;
- }
b = p->nodes[level];
} else if (ins_len < 0 && btrfs_header_nritems(b) <
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
- int sret;
if (*write_lock_level < level + 1) {
*write_lock_level = level + 1;
btrfs_release_path(p);
- goto again;
+ return -EAGAIN;
}
- btrfs_set_path_blocking(p);
- reada_for_balance(fs_info, p, level);
- sret = balance_level(trans, root, p, level);
+ reada_for_balance(p, level);
+ ret = balance_level(trans, root, p, level);
+ if (ret)
+ return ret;
- if (sret) {
- ret = sret;
- goto done;
- }
b = p->nodes[level];
if (!b) {
btrfs_release_path(p);
- goto again;
+ return -EAGAIN;
}
BUG_ON(btrfs_header_nritems(b) == 1);
}
- return 0;
-
-again:
- ret = -EAGAIN;
-done:
return ret;
}
@@ -2616,7 +2520,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
* We don't know the level of the root node until we actually
* have it read locked
*/
- b = __btrfs_read_lock_root_node(root, p->recurse);
+ b = btrfs_read_lock_root_node(root);
level = btrfs_header_level(b);
if (level > write_lock_level)
goto out;
@@ -2752,7 +2656,6 @@ again:
goto again;
}
- btrfs_set_path_blocking(p);
if (last_level)
err = btrfs_cow_block(trans, root, b, NULL, 0,
&b,
@@ -2822,7 +2725,6 @@ cow_done:
goto again;
}
- btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
@@ -2884,17 +2786,10 @@ cow_done:
if (!p->skip_locking) {
level = btrfs_header_level(b);
if (level <= write_lock_level) {
- if (!btrfs_try_tree_write_lock(b)) {
- btrfs_set_path_blocking(p);
- btrfs_tree_lock(b);
- }
+ btrfs_tree_lock(b);
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
- if (!btrfs_tree_read_lock_atomic(b)) {
- btrfs_set_path_blocking(p);
- __btrfs_tree_read_lock(b, BTRFS_NESTING_NORMAL,
- p->recurse);
- }
+ btrfs_tree_read_lock(b);
p->locks[level] = BTRFS_READ_LOCK;
}
p->nodes[level] = b;
@@ -2902,12 +2797,6 @@ cow_done:
}
ret = 1;
done:
- /*
- * we don't really know what they plan on doing with the path
- * from here on, so for now just mark it as blocking
- */
- if (!p->leave_spinning)
- btrfs_set_path_blocking(p);
if (ret < 0 && !p->skip_release_on_error)
btrfs_release_path(p);
return ret;
@@ -2999,10 +2888,7 @@ again:
}
level = btrfs_header_level(b);
- if (!btrfs_tree_read_lock_atomic(b)) {
- btrfs_set_path_blocking(p);
- btrfs_tree_read_lock(b);
- }
+ btrfs_tree_read_lock(b);
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
if (!b) {
ret = -ENOMEM;
@@ -3013,8 +2899,6 @@ again:
}
ret = 1;
done:
- if (!p->leave_spinning)
- btrfs_set_path_blocking(p);
if (ret < 0)
btrfs_release_path(p);
@@ -3441,7 +3325,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
add_root_to_dirty_list(root);
atomic_inc(&c->refs);
path->nodes[level] = c;
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
path->slots[level] = 0;
return 0;
}
@@ -3562,7 +3446,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
(c_nritems - mid) * sizeof(struct btrfs_key_ptr));
btrfs_set_header_nritems(split, c_nritems - mid);
btrfs_set_header_nritems(c, mid);
- ret = 0;
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
@@ -3580,7 +3463,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(split);
free_extent_buffer(split);
}
- return ret;
+ return 0;
}
/*
@@ -3814,7 +3697,6 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
return 1;
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
- btrfs_set_lock_blocking_write(right);
free_space = btrfs_leaf_free_space(right);
if (free_space < data_size)
@@ -4053,7 +3935,6 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
return 1;
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
- btrfs_set_lock_blocking_write(left);
free_space = btrfs_leaf_free_space(left);
if (free_space < data_size) {
@@ -4448,7 +4329,6 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
goto err;
}
- btrfs_set_path_blocking(path);
ret = split_leaf(trans, root, &key, path, ins_len, 1);
if (ret)
goto err;
@@ -4478,8 +4358,6 @@ static noinline int split_item(struct btrfs_path *path,
leaf = path->nodes[0];
BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
- btrfs_set_path_blocking(path);
-
item = btrfs_item_nr(path->slots[0]);
orig_offset = btrfs_item_offset(leaf, item);
item_size = btrfs_item_size(leaf, item);
@@ -5055,7 +4933,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (leaf == root->node) {
btrfs_set_header_level(leaf, 0);
} else {
- btrfs_set_path_blocking(path);
btrfs_clean_tree_block(leaf);
btrfs_del_leaf(trans, root, path, leaf);
}
@@ -5077,7 +4954,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
slot = path->slots[1];
atomic_inc(&leaf->refs);
- btrfs_set_path_blocking(path);
wret = push_leaf_left(trans, root, path, 1, 1,
1, (u32)-1);
if (wret < 0 && wret != -ENOSPC)
@@ -5248,7 +5124,6 @@ find_next_key:
*/
if (slot >= nritems) {
path->slots[level] = slot;
- btrfs_set_path_blocking(path);
sret = btrfs_find_next_key(root, path, min_key, level,
min_trans);
if (sret == 0) {
@@ -5265,7 +5140,6 @@ find_next_key:
ret = 0;
goto out;
}
- btrfs_set_path_blocking(path);
cur = btrfs_read_node_slot(cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
@@ -5282,7 +5156,6 @@ out:
path->keep_locks = keep_locks;
if (ret == 0) {
btrfs_unlock_up_safe(path, path->lowest_level + 1);
- btrfs_set_path_blocking(path);
memcpy(min_key, &found_key, sizeof(found_key));
}
return ret;
@@ -5384,8 +5257,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key key;
u32 nritems;
int ret;
- int old_spinning = path->leave_spinning;
- int next_rw_lock = 0;
+ int i;
nritems = btrfs_header_nritems(path->nodes[0]);
if (nritems == 0)
@@ -5395,11 +5267,9 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
again:
level = 1;
next = NULL;
- next_rw_lock = 0;
btrfs_release_path(path);
path->keep_locks = 1;
- path->leave_spinning = 1;
if (time_seq)
ret = btrfs_search_old_slot(root, &key, path, time_seq);
@@ -5459,13 +5329,22 @@ again:
continue;
}
- if (next) {
- btrfs_tree_unlock_rw(next, next_rw_lock);
- free_extent_buffer(next);
+
+ /*
+ * Our current level is where we're going to start from, and to
+ * make sure lockdep doesn't complain we need to drop our locks
+ * and nodes from 0 to our current level.
+ */
+ for (i = 0; i < level; i++) {
+ if (path->locks[level]) {
+ btrfs_tree_read_unlock(path->nodes[i]);
+ path->locks[i] = 0;
+ }
+ free_extent_buffer(path->nodes[i]);
+ path->nodes[i] = NULL;
}
next = c;
- next_rw_lock = path->locks[level];
ret = read_block_for_search(root, path, &next, level,
slot, &key);
if (ret == -EAGAIN)
@@ -5491,28 +5370,18 @@ again:
cond_resched();
goto again;
}
- if (!ret) {
- btrfs_set_path_blocking(path);
- __btrfs_tree_read_lock(next,
- BTRFS_NESTING_RIGHT,
- path->recurse);
- }
- next_rw_lock = BTRFS_READ_LOCK;
+ if (!ret)
+ btrfs_tree_read_lock(next);
}
break;
}
path->slots[level] = slot;
while (1) {
level--;
- c = path->nodes[level];
- if (path->locks[level])
- btrfs_tree_unlock_rw(c, path->locks[level]);
-
- free_extent_buffer(c);
path->nodes[level] = next;
path->slots[level] = 0;
if (!path->skip_locking)
- path->locks[level] = next_rw_lock;
+ path->locks[level] = BTRFS_READ_LOCK;
if (!level)
break;
@@ -5526,23 +5395,12 @@ again:
goto done;
}
- if (!path->skip_locking) {
- ret = btrfs_try_tree_read_lock(next);
- if (!ret) {
- btrfs_set_path_blocking(path);
- __btrfs_tree_read_lock(next,
- BTRFS_NESTING_RIGHT,
- path->recurse);
- }
- next_rw_lock = BTRFS_READ_LOCK;
- }
+ if (!path->skip_locking)
+ btrfs_tree_read_lock(next);
}
ret = 0;
done:
unlock_up(path, 0, 1, 0, NULL);
- path->leave_spinning = old_spinning;
- if (!old_spinning)
- btrfs_set_path_blocking(path);
return ret;
}
@@ -5564,7 +5422,6 @@ int btrfs_previous_item(struct btrfs_root *root,
while (1) {
if (path->slots[0] == 0) {
- btrfs_set_path_blocking(path);
ret = btrfs_prev_leaf(root, path);
if (ret != 0)
return ret;
@@ -5606,7 +5463,6 @@ int btrfs_previous_extent_item(struct btrfs_root *root,
while (1) {
if (path->slots[0] == 0) {
- btrfs_set_path_blocking(path);
ret = btrfs_prev_leaf(root, path);
if (ret != 0)
return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index d13006588647..1d3c1e479f3d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -27,6 +27,7 @@
#include <linux/dynamic_debug.h>
#include <linux/refcount.h>
#include <linux/crc32c.h>
+#include <linux/iomap.h>
#include "extent-io-tree.h"
#include "extent_io.h"
#include "extent_map.h"
@@ -66,12 +67,6 @@ struct btrfs_ref;
#define BTRFS_OLDEST_GENERATION 0ULL
/*
- * the max metadata block size. This limit is somewhat artificial,
- * but the memmove costs go through the roof for larger blocks.
- */
-#define BTRFS_MAX_METADATA_BLOCKSIZE 65536
-
-/*
* we can actually store much bigger names, but lets not confuse the rest
* of linux
*/
@@ -369,11 +364,9 @@ struct btrfs_path {
unsigned int search_for_split:1;
unsigned int keep_locks:1;
unsigned int skip_locking:1;
- unsigned int leave_spinning:1;
unsigned int search_commit_root:1;
unsigned int need_commit_sem:1;
unsigned int skip_release_on_error:1;
- unsigned int recurse:1;
};
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
@@ -468,10 +461,11 @@ struct btrfs_discard_ctl {
struct btrfs_block_group *block_group;
struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
u64 prev_discard;
+ u64 prev_discard_time;
atomic_t discardable_extents;
atomic64_t discardable_bytes;
u64 max_discard_size;
- unsigned long delay;
+ u64 delay_ms;
u32 iops_limit;
u32 kbps_limit;
u64 discard_extent_bytes;
@@ -558,6 +552,9 @@ enum {
/* Indicate that the discard workqueue can service discards. */
BTRFS_FS_DISCARD_RUNNING,
+
+ /* Indicate that we need to cleanup space cache v1 */
+ BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
};
/*
@@ -911,6 +908,7 @@ struct btrfs_fs_info {
/* Extent buffer radix tree */
spinlock_t buffer_lock;
+ /* Entries are eb->start / sectorsize */
struct radix_tree_root buffer_radix;
/* next backup root to be overwritten */
@@ -933,6 +931,10 @@ struct btrfs_fs_info {
/* Cached block sizes */
u32 nodesize;
u32 sectorsize;
+ /* ilog2 of sectorsize, use to avoid 64bit division */
+ u32 sectorsize_bits;
+ u32 csum_size;
+ u32 csums_per_leaf;
u32 stripesize;
/* Block groups and devices containing active swapfiles. */
@@ -950,6 +952,18 @@ struct btrfs_fs_info {
/* Type of exclusive operation running */
unsigned long exclusive_operation;
+ /*
+ * Zone size > 0 when in ZONED mode, otherwise it's used for a check
+ * if the mode is enabled
+ */
+ union {
+ u64 zone_size;
+ u64 zoned;
+ };
+
+ /* Max size to emit ZONE_APPEND write command */
+ u64 max_zone_append_size;
+
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
spinlock_t ref_verify_lock;
struct rb_root block_tree;
@@ -1020,7 +1034,7 @@ enum {
BTRFS_ROOT_DEAD_RELOC_TREE,
/* Mark dead root stored on device whose cleanup needs to be resumed */
BTRFS_ROOT_DEAD_TREE,
- /* The root has a log tree. Used only for subvolume roots. */
+ /* The root has a log tree. Used for subvolume roots and the tree root. */
BTRFS_ROOT_HAS_LOG_TREE,
/* Qgroup flushing is in progress */
BTRFS_ROOT_QGROUP_FLUSHING,
@@ -1059,15 +1073,6 @@ struct btrfs_root {
spinlock_t accounting_lock;
struct btrfs_block_rsv *block_rsv;
- /* free ino cache stuff */
- struct btrfs_free_space_ctl *free_ino_ctl;
- enum btrfs_caching_type ino_cache_state;
- spinlock_t ino_cache_lock;
- wait_queue_head_t ino_cache_wait;
- struct btrfs_free_space_ctl *free_ino_pinned;
- u64 ino_cache_progress;
- struct inode *ino_cache_inode;
-
struct mutex log_mutex;
wait_queue_head_t log_writer_wait;
wait_queue_head_t log_commit_wait[2];
@@ -1226,6 +1231,63 @@ struct btrfs_replace_extent_info {
int insertions;
};
+/* Arguments for btrfs_drop_extents() */
+struct btrfs_drop_extents_args {
+ /* Input parameters */
+
+ /*
+ * If NULL, btrfs_drop_extents() will allocate and free its own path.
+ * If 'replace_extent' is true, this must not be NULL. Also the path
+ * is always released except if 'replace_extent' is true and
+ * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
+ * the path is kept locked.
+ */
+ struct btrfs_path *path;
+ /* Start offset of the range to drop extents from */
+ u64 start;
+ /* End (exclusive, last byte + 1) of the range to drop extents from */
+ u64 end;
+ /* If true drop all the extent maps in the range */
+ bool drop_cache;
+ /*
+ * If true it means we want to insert a new extent after dropping all
+ * the extents in the range. If this is true, the 'extent_item_size'
+ * parameter must be set as well and the 'extent_inserted' field will
+ * be set to true by btrfs_drop_extents() if it could insert the new
+ * extent.
+ * Note: when this is set to true the path must not be NULL.
+ */
+ bool replace_extent;
+ /*
+ * Used if 'replace_extent' is true. Size of the file extent item to
+ * insert after dropping all existing extents in the range
+ */
+ u32 extent_item_size;
+
+ /* Output parameters */
+
+ /*
+ * Set to the minimum between the input parameter 'end' and the end
+ * (exclusive, last byte + 1) of the last dropped extent. This is always
+ * set even if btrfs_drop_extents() returns an error.
+ */
+ u64 drop_end;
+ /*
+ * The number of allocated bytes found in the range. This can be smaller
+ * than the range's length when there are holes in the range.
+ */
+ u64 bytes_found;
+ /*
+ * Only set if 'replace_extent' is true. Set to true if we were able
+ * to insert a replacement extent after dropping all extents in the
+ * range, otherwise set to false by btrfs_drop_extents().
+ * Also, if btrfs_drop_extents() has set this to true it means it
+ * returned with the path locked, otherwise if it has set this to
+ * false it has returned with the path released.
+ */
+ bool extent_inserted;
+};
+
struct btrfs_file_private {
void *filldir_buf;
};
@@ -1284,7 +1346,7 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
-#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17)
+/* bit 17 is free */
#define BTRFS_MOUNT_USEBACKUPROOT (1 << 18)
#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19)
#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20)
@@ -1297,6 +1359,8 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
#define BTRFS_MOUNT_NOLOGREPLAY (1 << 27)
#define BTRFS_MOUNT_REF_VERIFY (1 << 28)
#define BTRFS_MOUNT_DISCARD_ASYNC (1 << 29)
+#define BTRFS_MOUNT_IGNOREBADROOTS (1 << 30)
+#define BTRFS_MOUNT_IGNOREDATACSUMS (1 << 31)
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
#define BTRFS_DEFAULT_MAX_INLINE (2048)
@@ -1329,9 +1393,7 @@ do { \
* transaction commit)
*/
-#define BTRFS_PENDING_SET_INODE_MAP_CACHE (0)
-#define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1)
-#define BTRFS_PENDING_COMMIT (2)
+#define BTRFS_PENDING_COMMIT (0)
#define btrfs_test_pending(info, opt) \
test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
@@ -1404,7 +1466,7 @@ struct btrfs_map_token {
};
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
- ((bytes) >> (fs_info)->sb->s_blocksize_bits)
+ ((bytes) >> (fs_info)->sectorsize_bits)
static inline void btrfs_init_map_token(struct btrfs_map_token *token,
struct extent_buffer *eb)
@@ -1489,13 +1551,14 @@ static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
{ \
- const type *p = page_address(eb->pages[0]); \
+ const type *p = page_address(eb->pages[0]) + \
+ offset_in_page(eb->start); \
return get_unaligned_le##bits(&p->member); \
} \
static inline void btrfs_set_##name(const struct extent_buffer *eb, \
u##bits val) \
{ \
- type *p = page_address(eb->pages[0]); \
+ type *p = page_address(eb->pages[0]) + offset_in_page(eb->start); \
put_unaligned_le##bits(val, &p->member); \
}
@@ -2085,6 +2148,7 @@ BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8);
BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item,
generation, 64);
BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64);
+BTRFS_SETGET_STACK_FUNCS(root_drop_level, struct btrfs_root_item, drop_level, 8);
BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64);
BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32);
@@ -2517,7 +2581,17 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
+/*
+ * Take the number of bytes to be checksummmed and figure out how many leaves
+ * it would require to store the csums for that many bytes.
+ */
+static inline u64 btrfs_csum_bytes_to_leaves(
+ const struct btrfs_fs_info *fs_info, u64 csum_bytes)
+{
+ const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
+
+ return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
+}
/*
* Use this if we would be adding new items, as we could split nodes as we cow
@@ -2592,7 +2666,6 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc);
int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
u64 len);
-void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref);
@@ -2939,8 +3012,7 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len);
-blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
- u64 offset, u8 *dst);
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
@@ -2967,13 +3039,13 @@ int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len);
int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
u64 len);
-void btrfs_inode_safe_disk_i_size_write(struct inode *inode, u64 new_i_size);
+void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size);
u64 btrfs_file_extent_end(const struct btrfs_path *path);
/* inode.c */
blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
-int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u64 phy_offset,
+int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
struct page *page, u64 start, u64 end, int mirror);
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
u64 start, u64 len);
@@ -2993,11 +3065,11 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
const char *name, int name_len, int add_backref, u64 index);
int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
-int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
- int front);
+int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
+ int front);
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct inode *inode, u64 new_size,
+ struct btrfs_inode *inode, u64 new_size,
u32 min_type);
int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
@@ -3037,14 +3109,13 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 end);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode);
+ struct btrfs_root *root, struct btrfs_inode *inode);
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode);
+ struct btrfs_root *root, struct btrfs_inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
-int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
+int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
@@ -3062,7 +3133,18 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
u64 end, int uptodate);
extern const struct dentry_operations btrfs_dentry_operations;
-ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
+extern const struct iomap_ops btrfs_dio_iomap_ops;
+extern const struct iomap_dio_ops btrfs_dio_ops;
+
+/* Inode locking type flags, by default the exclusive lock is taken */
+#define BTRFS_ILOCK_SHARED (1U << 0)
+#define BTRFS_ILOCK_TRY (1U << 1)
+
+int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags);
+void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags);
+void btrfs_update_inode_bytes(struct btrfs_inode *inode,
+ const u64 add_bytes,
+ const u64 del_bytes);
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -3092,16 +3174,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
int skip_pinned);
extern const struct file_operations btrfs_file_operations;
-int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode,
- struct btrfs_path *path, u64 start, u64 end,
- u64 *drop_end, int drop_cache,
- int replace_extent,
- u32 extent_item_size,
- int *key_inserted);
int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode, u64 start,
- u64 end, int drop_cache);
+ struct btrfs_root *root, struct btrfs_inode *inode,
+ struct btrfs_drop_extents_args *args);
int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
const u64 start, const u64 end,
struct btrfs_replace_extent_info *extent_info,
@@ -3111,7 +3186,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
int btrfs_release_file(struct inode *inode, struct file *file);
int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
- struct extent_state **cached);
+ struct extent_state **cached, bool noreserve);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes);
@@ -3291,6 +3366,39 @@ static inline void assertfail(const char *expr, const char* file, int line) { }
#endif
/*
+ * Get the correct offset inside the page of extent buffer.
+ *
+ * @eb: target extent buffer
+ * @start: offset inside the extent buffer
+ *
+ * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
+ */
+static inline size_t get_eb_offset_in_page(const struct extent_buffer *eb,
+ unsigned long offset)
+{
+ /*
+ * For sectorsize == PAGE_SIZE case, eb->start will always be aligned
+ * to PAGE_SIZE, thus adding it won't cause any difference.
+ *
+ * For sectorsize < PAGE_SIZE, we must only read the data that belongs
+ * to the eb, thus we have to take the eb->start into consideration.
+ */
+ return offset_in_page(offset + eb->start);
+}
+
+static inline unsigned long get_eb_page_index(unsigned long offset)
+{
+ /*
+ * For sectorsize == PAGE_SIZE case, plain >> PAGE_SHIFT is enough.
+ *
+ * For sectorsize < PAGE_SIZE case, we only support 64K PAGE_SIZE,
+ * and have ensured that all tree blocks are contained in one page,
+ * thus we always get index == 0.
+ */
+ return offset >> PAGE_SHIFT;
+}
+
+/*
* Use that for functions that are conditionally exported for sanity tests but
* otherwise static
*/
@@ -3599,4 +3707,9 @@ static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
}
#endif
+static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
+{
+ return fs_info->zoned != 0;
+}
+
#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5aba81e16113..70c0340d839c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -740,13 +740,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
goto out;
}
- /*
- * we need allocate some memory space, but it might cause the task
- * to sleep, so we set all locked nodes in the path to blocking locks
- * first.
- */
- btrfs_set_path_blocking(path);
-
keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
if (!keys) {
ret = -ENOMEM;
@@ -1154,7 +1147,6 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
@@ -1219,7 +1211,6 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
btrfs_release_delayed_node(delayed_node);
return -ENOMEM;
}
- path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
@@ -1264,7 +1255,6 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
ret = -ENOMEM;
goto trans_out;
}
- path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
@@ -1333,7 +1323,6 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
if (!delayed_node)
break;
- path->leave_spinning = 1;
root = delayed_node->root;
trans = btrfs_join_transaction(root);
@@ -1826,27 +1815,29 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
}
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode)
+ struct btrfs_root *root,
+ struct btrfs_inode *inode)
{
struct btrfs_delayed_node *delayed_node;
int ret = 0;
- delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
+ delayed_node = btrfs_get_or_create_delayed_node(inode);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
- fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+ fill_stack_inode_item(trans, &delayed_node->inode_item,
+ &inode->vfs_inode);
goto release_node;
}
- ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
+ ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
delayed_node);
if (ret)
goto release_node;
- fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+ fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
delayed_node->count++;
atomic_inc(&root->fs_info->delayed_root->items);
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index ca96ef007d8f..b2412160c5bc 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -110,7 +110,8 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode);
+ struct btrfs_root *root,
+ struct btrfs_inode *inode);
int btrfs_fill_inode(struct inode *inode, u32 *rdev);
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 10638537b9ef..a98e33f232d5 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -21,6 +21,7 @@
#include "rcu-string.h"
#include "dev-replace.h"
#include "sysfs.h"
+#include "zoned.h"
/*
* Device replace overview
@@ -96,7 +97,7 @@ no_valid_dev_replace_entry_found:
* a replace target, fail the mount.
*/
if (btrfs_find_device(fs_info->fs_devices,
- BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
+ BTRFS_DEV_REPLACE_DEVID, NULL, NULL)) {
btrfs_err(fs_info,
"found replace target device without a valid replace item");
ret = -EUCLEAN;
@@ -159,7 +160,7 @@ no_valid_dev_replace_entry_found:
* replace target, fail the mount.
*/
if (btrfs_find_device(fs_info->fs_devices,
- BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
+ BTRFS_DEV_REPLACE_DEVID, NULL, NULL)) {
btrfs_err(fs_info,
"replace devid present without an active replace item");
ret = -EUCLEAN;
@@ -171,10 +172,10 @@ no_valid_dev_replace_entry_found:
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
dev_replace->srcdev = btrfs_find_device(fs_info->fs_devices,
- src_devid, NULL, NULL, true);
+ src_devid, NULL, NULL);
dev_replace->tgtdev = btrfs_find_device(fs_info->fs_devices,
BTRFS_DEV_REPLACE_DEVID,
- NULL, NULL, true);
+ NULL, NULL);
/*
* allow 'btrfs dev replace_cancel' if src/tgt device is
* missing
@@ -259,6 +260,13 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return PTR_ERR(bdev);
}
+ if (!btrfs_check_device_zone_type(fs_info, bdev)) {
+ btrfs_err(fs_info,
+ "dev-replace: zoned type of target device mismatch with filesystem");
+ ret = -EINVAL;
+ goto error;
+ }
+
sync_blockdev(bdev);
list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
@@ -313,6 +321,10 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
device->fs_devices = fs_info->fs_devices;
+ ret = btrfs_get_dev_zone_info(device);
+ if (ret)
+ goto error;
+
mutex_lock(&fs_info->fs_devices->device_list_mutex);
list_add(&device->dev_list, &fs_info->fs_devices->devices);
fs_info->fs_devices->num_devices++;
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 863367c2c620..98b63ebed539 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -127,7 +127,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
btrfs_cpu_key_to_disk(&disk_key, location);
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 741c7e19c32f..1db966bf85b2 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -355,7 +355,7 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
block_group = find_next_block_group(discard_ctl, now);
if (block_group) {
- unsigned long delay = discard_ctl->delay;
+ u64 delay = discard_ctl->delay_ms * NSEC_PER_MSEC;
u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit);
/*
@@ -366,9 +366,9 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
if (kbps_limit && discard_ctl->prev_discard) {
u64 bps_limit = ((u64)kbps_limit) * SZ_1K;
u64 bps_delay = div64_u64(discard_ctl->prev_discard *
- MSEC_PER_SEC, bps_limit);
+ NSEC_PER_SEC, bps_limit);
- delay = max(delay, msecs_to_jiffies(bps_delay));
+ delay = max(delay, bps_delay);
}
/*
@@ -378,11 +378,20 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
if (now < block_group->discard_eligible_time) {
u64 bg_timeout = block_group->discard_eligible_time - now;
- delay = max(delay, nsecs_to_jiffies(bg_timeout));
+ delay = max(delay, bg_timeout);
+ }
+
+ if (override && discard_ctl->prev_discard) {
+ u64 elapsed = now - discard_ctl->prev_discard_time;
+
+ if (delay > elapsed)
+ delay -= elapsed;
+ else
+ delay = 0;
}
mod_delayed_work(discard_ctl->discard_workers,
- &discard_ctl->work, delay);
+ &discard_ctl->work, nsecs_to_jiffies(delay));
}
out:
spin_unlock(&discard_ctl->lock);
@@ -465,7 +474,12 @@ static void btrfs_discard_workfn(struct work_struct *work)
discard_ctl->discard_extent_bytes += trimmed;
}
+ /*
+ * Updated without locks as this is inside the workfn and nothing else
+ * is reading the values
+ */
discard_ctl->prev_discard = trimmed;
+ discard_ctl->prev_discard_time = ktime_get_ns();
/* Determine next steps for a block_group */
if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
@@ -519,7 +533,6 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
s64 discardable_bytes;
u32 iops_limit;
unsigned long delay;
- unsigned long lower_limit = BTRFS_DISCARD_MIN_DELAY_MSEC;
discardable_extents = atomic_read(&discard_ctl->discardable_extents);
if (!discardable_extents)
@@ -550,12 +563,13 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
iops_limit = READ_ONCE(discard_ctl->iops_limit);
if (iops_limit)
- lower_limit = max_t(unsigned long, lower_limit,
- MSEC_PER_SEC / iops_limit);
+ delay = MSEC_PER_SEC / iops_limit;
+ else
+ delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
- delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
- delay = clamp(delay, lower_limit, BTRFS_DISCARD_MAX_DELAY_MSEC);
- discard_ctl->delay = msecs_to_jiffies(delay);
+ delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC,
+ BTRFS_DISCARD_MAX_DELAY_MSEC);
+ discard_ctl->delay_ms = delay;
spin_unlock(&discard_ctl->lock);
}
@@ -563,15 +577,14 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
/**
* btrfs_discard_update_discardable - propagate discard counters
* @block_group: block_group of interest
- * @ctl: free_space_ctl of @block_group
*
* This propagates deltas of counters up to the discard_ctl. It maintains a
* current counter and a previous counter passing the delta up to the global
* stat. Then the current counter value becomes the previous counter value.
*/
-void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
- struct btrfs_free_space_ctl *ctl)
+void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
{
+ struct btrfs_free_space_ctl *ctl;
struct btrfs_discard_ctl *discard_ctl;
s32 extents_delta;
s64 bytes_delta;
@@ -581,8 +594,10 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
!btrfs_is_block_group_data_only(block_group))
return;
+ ctl = block_group->free_space_ctl;
discard_ctl = &block_group->fs_info->discard_ctl;
+ lockdep_assert_held(&ctl->tree_lock);
extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
ctl->discardable_extents[BTRFS_STAT_PREV];
if (extents_delta) {
@@ -684,10 +699,11 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
discard_ctl->prev_discard = 0;
+ discard_ctl->prev_discard_time = 0;
atomic_set(&discard_ctl->discardable_extents, 0);
atomic64_set(&discard_ctl->discardable_bytes, 0);
discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE;
- discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC;
+ discard_ctl->delay_ms = BTRFS_DISCARD_MAX_DELAY_MSEC;
discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
discard_ctl->kbps_limit = 0;
discard_ctl->discard_extent_bytes = 0;
diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index 353228d62f5a..57b9202f427f 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -28,8 +28,7 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
/* Update operations */
void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
-void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
- struct btrfs_free_space_ctl *ctl);
+void btrfs_discard_update_discardable(struct btrfs_block_group *block_group);
/* Setup/cleanup operations */
void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index af97ddcc6b3e..765deefda92b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,7 +29,6 @@
#include "tree-log.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
-#include "inode-map.h"
#include "check-integrity.h"
#include "rcu-string.h"
#include "dev-replace.h"
@@ -42,6 +41,7 @@
#include "block-group.h"
#include "discard.h"
#include "space-info.h"
+#include "zoned.h"
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
@@ -109,15 +109,13 @@ static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
* just before they are sent down the IO stack.
*/
struct async_submit_bio {
- void *private_data;
+ struct inode *inode;
struct bio *bio;
extent_submit_bio_start_t *submit_bio_start;
int mirror_num;
- /*
- * bio_offset is optional, can be used if the pages in the bio
- * can't tell us where in the file the bio should go
- */
- u64 bio_offset;
+
+ /* Optional parameter for submit_bio_start used by direct io */
+ u64 dio_file_offset;
struct btrfs_work work;
blk_status_t status;
};
@@ -150,40 +148,41 @@ struct async_submit_bio {
# error
# endif
+#define DEFINE_LEVEL(stem, level) \
+ .names[level] = "btrfs-" stem "-0" #level,
+
+#define DEFINE_NAME(stem) \
+ DEFINE_LEVEL(stem, 0) \
+ DEFINE_LEVEL(stem, 1) \
+ DEFINE_LEVEL(stem, 2) \
+ DEFINE_LEVEL(stem, 3) \
+ DEFINE_LEVEL(stem, 4) \
+ DEFINE_LEVEL(stem, 5) \
+ DEFINE_LEVEL(stem, 6) \
+ DEFINE_LEVEL(stem, 7)
+
static struct btrfs_lockdep_keyset {
u64 id; /* root objectid */
- const char *name_stem; /* lock name stem */
- char names[BTRFS_MAX_LEVEL + 1][20];
- struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
+ /* Longest entry: btrfs-free-space-00 */
+ char names[BTRFS_MAX_LEVEL][20];
+ struct lock_class_key keys[BTRFS_MAX_LEVEL];
} btrfs_lockdep_keysets[] = {
- { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
- { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
- { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
- { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
- { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
- { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
- { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
- { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
- { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
- { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
- { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
- { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
- { .id = 0, .name_stem = "tree" },
+ { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
+ { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
+ { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
+ { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
+ { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
+ { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
+ { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
+ { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
+ { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
+ { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
+ { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
+ { .id = 0, DEFINE_NAME("tree") },
};
-void __init btrfs_init_lockdep(void)
-{
- int i, j;
-
- /* initialize lockdep class names */
- for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
- struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
-
- for (j = 0; j < ARRAY_SIZE(ks->names); j++)
- snprintf(ks->names[j], sizeof(ks->names[j]),
- "btrfs-%s-%02d", ks->name_stem, j);
- }
-}
+#undef DEFINE_LEVEL
+#undef DEFINE_NAME
void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
int level)
@@ -210,15 +209,16 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
+ const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
char *kaddr;
int i;
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
- kaddr = page_address(buf->pages[0]);
+ kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start);
crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
- PAGE_SIZE - BTRFS_CSUM_SIZE);
+ first_page_part - BTRFS_CSUM_SIZE);
for (i = 1; i < num_pages; i++) {
kaddr = page_address(buf->pages[i]);
@@ -248,10 +248,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
if (atomic)
return -EAGAIN;
- if (need_lock) {
+ if (need_lock)
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_read(eb);
- }
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state);
@@ -280,7 +278,7 @@ out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state);
if (need_lock)
- btrfs_tree_read_unlock_blocking(eb);
+ btrfs_tree_read_unlock(eb);
return ret;
}
@@ -319,7 +317,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
- if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
+ if (memcmp(disk_sb->csum, result, fs_info->csum_size))
return 1;
return 0;
@@ -443,16 +441,16 @@ static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
}
/*
- * checksum a dirty tree block before IO. This has extra checks to make sure
- * we only fill in the checksum field in the first page of a multi-page block
+ * Checksum a dirty tree block before IO. This has extra checks to make sure
+ * we only fill in the checksum field in the first page of a multi-page block.
+ * For subpage extent buffers we need bvec to also read the offset in the page.
*/
-
-static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
+static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct bio_vec *bvec)
{
+ struct page *page = bvec->bv_page;
u64 start = page_offset(page);
u64 found_start;
u8 result[BTRFS_CSUM_SIZE];
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
struct extent_buffer *eb;
int ret;
@@ -489,7 +487,7 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
return ret;
}
- write_extent_buffer(eb, result, 0, csum_size);
+ write_extent_buffer(eb, result, 0, fs_info->csum_size);
return 0;
}
@@ -523,65 +521,37 @@ static int check_tree_block_fsid(struct extent_buffer *eb)
return 1;
}
-int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
- struct page *page, u64 start, u64 end,
- int mirror)
+/* Do basic extent buffer checks at read time */
+static int validate_extent_buffer(struct extent_buffer *eb)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start;
- int found_level;
- struct extent_buffer *eb;
- struct btrfs_fs_info *fs_info;
- u16 csum_size;
- int ret = 0;
+ const u32 csum_size = fs_info->csum_size;
+ u8 found_level;
u8 result[BTRFS_CSUM_SIZE];
- int reads_done;
-
- if (!page->private)
- goto out;
-
- eb = (struct extent_buffer *)page->private;
- fs_info = eb->fs_info;
- csum_size = btrfs_super_csum_size(fs_info->super_copy);
-
- /* the pending IO might have been the only thing that kept this buffer
- * in memory. Make sure we have a ref for all this other checks
- */
- atomic_inc(&eb->refs);
-
- reads_done = atomic_dec_and_test(&eb->io_pages);
- if (!reads_done)
- goto err;
-
- eb->read_mirror = mirror;
- if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
- ret = -EIO;
- goto err;
- }
+ int ret = 0;
found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) {
btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
eb->start, found_start);
ret = -EIO;
- goto err;
+ goto out;
}
if (check_tree_block_fsid(eb)) {
btrfs_err_rl(fs_info, "bad fsid on block %llu",
eb->start);
ret = -EIO;
- goto err;
+ goto out;
}
found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "bad tree block level %d on %llu",
(int)btrfs_header_level(eb), eb->start);
ret = -EIO;
- goto err;
+ goto out;
}
- btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
- eb, found_level);
-
csum_tree_block(eb, result);
if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
@@ -595,7 +565,7 @@ int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
CSUM_FMT_VALUE(csum_size, result),
btrfs_header_level(eb));
ret = -EUCLEAN;
- goto err;
+ goto out;
}
/*
@@ -617,6 +587,37 @@ int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
btrfs_err(fs_info,
"block=%llu read time tree block corruption detected",
eb->start);
+out:
+ return ret;
+}
+
+int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio,
+ struct page *page, u64 start, u64 end,
+ int mirror)
+{
+ struct extent_buffer *eb;
+ int ret = 0;
+ int reads_done;
+
+ ASSERT(page->private);
+ eb = (struct extent_buffer *)page->private;
+
+ /*
+ * The pending IO might have been the only thing that kept this buffer
+ * in memory. Make sure we have a ref for all this other checks
+ */
+ atomic_inc(&eb->refs);
+
+ reads_done = atomic_dec_and_test(&eb->io_pages);
+ if (!reads_done)
+ goto err;
+
+ eb->read_mirror = mirror;
+ if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
+ ret = -EIO;
+ goto err;
+ }
+ ret = validate_extent_buffer(eb);
err:
if (reads_done &&
test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
@@ -632,7 +633,7 @@ err:
clear_extent_buffer_uptodate(eb);
}
free_extent_buffer(eb);
-out:
+
return ret;
}
@@ -694,8 +695,8 @@ static void run_one_async_start(struct btrfs_work *work)
blk_status_t ret;
async = container_of(work, struct async_submit_bio, work);
- ret = async->submit_bio_start(async->private_data, async->bio,
- async->bio_offset);
+ ret = async->submit_bio_start(async->inode, async->bio,
+ async->dio_file_offset);
if (ret)
async->status = ret;
}
@@ -715,7 +716,7 @@ static void run_one_async_done(struct btrfs_work *work)
blk_status_t ret;
async = container_of(work, struct async_submit_bio, work);
- inode = async->private_data;
+ inode = async->inode;
/* If an error occurred we just want to clean up the bio and move on */
if (async->status) {
@@ -745,18 +746,19 @@ static void run_one_async_free(struct btrfs_work *work)
kfree(async);
}
-blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
+blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
- u64 bio_offset, void *private_data,
+ u64 dio_file_offset,
extent_submit_bio_start_t *submit_bio_start)
{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct async_submit_bio *async;
async = kmalloc(sizeof(*async), GFP_NOFS);
if (!async)
return BLK_STS_RESOURCE;
- async->private_data = private_data;
+ async->inode = inode;
async->bio = bio;
async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start;
@@ -764,7 +766,7 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
run_one_async_free);
- async->bio_offset = bio_offset;
+ async->dio_file_offset = dio_file_offset;
async->status = 0;
@@ -785,7 +787,7 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
+ ret = csum_dirty_buffer(root->fs_info, bvec);
if (ret)
break;
}
@@ -793,8 +795,8 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
return errno_to_blk_status(ret);
}
-static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
- u64 bio_offset)
+static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
+ u64 dio_file_offset)
{
/*
* when we're called for a write, we're already in the async
@@ -840,8 +842,8 @@ blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
- ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
- 0, inode, btree_submit_bio_start);
+ ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
+ 0, btree_submit_bio_start);
}
if (ret)
@@ -947,47 +949,33 @@ static const struct address_space_operations btree_aops = {
.set_page_dirty = btree_set_page_dirty,
};
-void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
-{
- struct extent_buffer *buf = NULL;
- int ret;
-
- buf = btrfs_find_create_tree_block(fs_info, bytenr);
- if (IS_ERR(buf))
- return;
-
- ret = read_extent_buffer_pages(buf, WAIT_NONE, 0);
- if (ret < 0)
- free_extent_buffer_stale(buf);
- else
- free_extent_buffer(buf);
-}
-
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
- u64 bytenr)
+ u64 bytenr, u64 owner_root,
+ int level)
{
if (btrfs_is_testing(fs_info))
return alloc_test_extent_buffer(fs_info, bytenr);
- return alloc_extent_buffer(fs_info, bytenr);
+ return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
}
/*
* Read tree block at logical address @bytenr and do variant basic but critical
* verification.
*
+ * @owner_root: the objectid of the root owner for this block.
* @parent_transid: expected transid of this tree block, skip check if 0
* @level: expected level, mandatory check
* @first_key: expected key in slot 0, skip check if NULL
*/
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 parent_transid, int level,
- struct btrfs_key *first_key)
+ u64 owner_root, u64 parent_transid,
+ int level, struct btrfs_key *first_key)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(fs_info, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
if (IS_ERR(buf))
return buf;
@@ -1012,8 +1000,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf)
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
-buf->len,
fs_info->dirty_metadata_batch);
- /* ugh, clear_extent_buffer_dirty needs to lock the page */
- btrfs_set_lock_blocking_write(buf);
clear_extent_buffer_dirty(buf);
}
}
@@ -1155,7 +1141,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
leaf = NULL;
- goto fail;
+ goto fail_unlock;
}
root->node = leaf;
@@ -1164,8 +1150,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
root->commit_root = btrfs_root_node(root);
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- root->root_item.flags = 0;
- root->root_item.byte_limit = 0;
+ btrfs_set_root_flags(&root->root_item, 0);
+ btrfs_set_root_limit(&root->root_item, 0);
btrfs_set_root_bytenr(&root->root_item, leaf->start);
btrfs_set_root_generation(&root->root_item, trans->transid);
btrfs_set_root_level(&root->root_item, 0);
@@ -1177,7 +1163,9 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
generate_random_guid(root->root_item.uuid);
else
export_guid(root->root_item.uuid, &guid_null);
- root->root_item.drop_level = 0;
+ btrfs_set_root_drop_level(&root->root_item, 0);
+
+ btrfs_tree_unlock(leaf);
key.objectid = objectid;
key.type = BTRFS_ROOT_ITEM_KEY;
@@ -1186,13 +1174,12 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
if (ret)
goto fail;
- btrfs_tree_unlock(leaf);
-
return root;
-fail:
+fail_unlock:
if (leaf)
btrfs_tree_unlock(leaf);
+fail:
btrfs_put_root(root);
return ERR_PTR(ret);
@@ -1307,7 +1294,7 @@ static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
level = btrfs_root_level(&root->root_item);
root->node = read_tree_block(fs_info,
btrfs_root_bytenr(&root->root_item),
- generation, level, NULL);
+ key->objectid, generation, level, NULL);
if (IS_ERR(root->node)) {
ret = PTR_ERR(root->node);
root->node = NULL;
@@ -1348,14 +1335,6 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
int ret;
unsigned int nofs_flag;
- root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
- root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
- GFP_NOFS);
- if (!root->free_ino_pinned || !root->free_ino_ctl) {
- ret = -ENOMEM;
- goto fail;
- }
-
/*
* We might be called under a transaction (e.g. indirect backref
* resolution) which could deadlock if it triggers memory reclaim
@@ -1372,10 +1351,6 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
btrfs_check_and_init_root_item(&root->root_item);
}
- btrfs_init_free_ino_ctl(root);
- spin_lock_init(&root->ino_cache_lock);
- init_waitqueue_head(&root->ino_cache_wait);
-
/*
* Don't assign anonymous block device to roots that are not exposed to
* userspace, the id pool is limited to 1M
@@ -1774,13 +1749,13 @@ static int transaction_kthread(void *arg)
struct btrfs_trans_handle *trans;
struct btrfs_transaction *cur;
u64 transid;
- time64_t now;
+ time64_t delta;
unsigned long delay;
bool cannot_commit;
do {
cannot_commit = false;
- delay = HZ * fs_info->commit_interval;
+ delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&fs_info->trans_lock);
@@ -1790,12 +1765,13 @@ static int transaction_kthread(void *arg)
goto sleep;
}
- now = ktime_get_seconds();
+ delta = ktime_get_seconds() - cur->start_time;
if (cur->state < TRANS_STATE_COMMIT_START &&
- (now < cur->start_time ||
- now - cur->start_time < fs_info->commit_interval)) {
+ delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
- delay = HZ * 5;
+ delay -= msecs_to_jiffies((delta - 1) * 1000);
+ delay = min(delay,
+ msecs_to_jiffies(fs_info->commit_interval * 1000));
goto sleep;
}
transid = cur->transid;
@@ -2044,8 +2020,6 @@ void btrfs_put_root(struct btrfs_root *root)
free_anon_bdev(root->anon_dev);
btrfs_drew_lock_destroy(&root->snapshot_lock);
free_root_extent_buffers(root);
- kfree(root->free_ino_ctl);
- kfree(root->free_ino_pinned);
#ifdef CONFIG_BTRFS_DEBUG
spin_lock(&root->fs_info->fs_roots_radix_lock);
list_del_init(&root->leak_list);
@@ -2260,8 +2234,9 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
return -ENOMEM;
log_tree_root->node = read_tree_block(fs_info, bytenr,
- fs_info->generation + 1,
- level, NULL);
+ BTRFS_TREE_LOG_OBJECTID,
+ fs_info->generation + 1, level,
+ NULL);
if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
@@ -2306,30 +2281,42 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
root = btrfs_read_tree_root(tree_root, &location);
if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->extent_root = root;
}
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->extent_root = root;
location.objectid = BTRFS_DEV_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->dev_root = root;
+ btrfs_init_devices_late(fs_info);
}
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->dev_root = root;
- btrfs_init_devices_late(fs_info);
- location.objectid = BTRFS_CSUM_TREE_OBJECTID;
- root = btrfs_read_tree_root(tree_root, &location);
- if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
+ /* If IGNOREDATACSUMS is set don't bother reading the csum root. */
+ if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
+ location.objectid = BTRFS_CSUM_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root)) {
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->csum_root = root;
+ }
}
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->csum_root = root;
/*
* This tree can share blocks with some other fs tree during relocation
@@ -2338,11 +2325,14 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
root = btrfs_get_fs_root(tree_root->fs_info,
BTRFS_DATA_RELOC_TREE_OBJECTID, true);
if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->data_reloc_root = root;
}
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->data_reloc_root = root;
location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
@@ -2355,9 +2345,11 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
location.objectid = BTRFS_UUID_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- if (ret != -ENOENT)
- goto out;
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ if (ret != -ENOENT)
+ goto out;
+ }
} else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->uuid_root = root;
@@ -2367,11 +2359,14 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
if (IS_ERR(root)) {
- ret = PTR_ERR(root);
- goto out;
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->free_space_root = root;
}
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->free_space_root = root;
}
return 0;
@@ -2627,6 +2622,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
generation = btrfs_super_generation(sb);
level = btrfs_super_root_level(sb);
tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
+ BTRFS_ROOT_TREE_OBJECTID,
generation, level, NULL);
if (IS_ERR(tree_root->node)) {
handle_error = true;
@@ -2791,6 +2787,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
/* Usable values until the real ones are cached from the superblock */
fs_info->nodesize = 4096;
fs_info->sectorsize = 4096;
+ fs_info->sectorsize_bits = ilog2(4096);
fs_info->stripesize = 4096;
spin_lock_init(&fs_info->swapfile_pins_lock);
@@ -2873,6 +2870,109 @@ static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
return 0;
}
+/*
+ * Some options only have meaning at mount time and shouldn't persist across
+ * remounts, or be displayed. Clear these at the end of mount and remount
+ * code paths.
+ */
+void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
+{
+ btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
+ btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
+}
+
+/*
+ * Mounting logic specific to read-write file systems. Shared by open_ctree
+ * and btrfs_remount when remounting from read-only to read-write.
+ */
+int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
+{
+ int ret;
+ const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
+ bool clear_free_space_tree = false;
+
+ if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
+ btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+ clear_free_space_tree = true;
+ } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
+ btrfs_warn(fs_info, "free space tree is invalid");
+ clear_free_space_tree = true;
+ }
+
+ if (clear_free_space_tree) {
+ btrfs_info(fs_info, "clearing free space tree");
+ ret = btrfs_clear_free_space_tree(fs_info);
+ if (ret) {
+ btrfs_warn(fs_info,
+ "failed to clear free space tree: %d", ret);
+ goto out;
+ }
+ }
+
+ ret = btrfs_cleanup_fs_roots(fs_info);
+ if (ret)
+ goto out;
+
+ down_read(&fs_info->cleanup_work_sem);
+ if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+ (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+ up_read(&fs_info->cleanup_work_sem);
+ goto out;
+ }
+ up_read(&fs_info->cleanup_work_sem);
+
+ mutex_lock(&fs_info->cleaner_mutex);
+ ret = btrfs_recover_relocation(fs_info->tree_root);
+ mutex_unlock(&fs_info->cleaner_mutex);
+ if (ret < 0) {
+ btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
+ goto out;
+ }
+
+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
+ !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+ btrfs_info(fs_info, "creating free space tree");
+ ret = btrfs_create_free_space_tree(fs_info);
+ if (ret) {
+ btrfs_warn(fs_info,
+ "failed to create free space tree: %d", ret);
+ goto out;
+ }
+ }
+
+ if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
+ ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
+ if (ret)
+ goto out;
+ }
+
+ ret = btrfs_resume_balance_async(fs_info);
+ if (ret)
+ goto out;
+
+ ret = btrfs_resume_dev_replace_async(fs_info);
+ if (ret) {
+ btrfs_warn(fs_info, "failed to resume dev_replace");
+ goto out;
+ }
+
+ btrfs_qgroup_rescan_resume(fs_info);
+
+ if (!fs_info->uuid_root) {
+ btrfs_info(fs_info, "creating UUID tree");
+ ret = btrfs_create_uuid_tree(fs_info);
+ if (ret) {
+ btrfs_warn(fs_info,
+ "failed to create the UUID tree %d", ret);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
char *options)
{
@@ -2888,7 +2988,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
struct btrfs_root *chunk_root;
int ret;
int err = -EINVAL;
- int clear_free_space_tree = 0;
int level;
ret = init_mount_fs_info(fs_info, sb);
@@ -3035,6 +3134,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
btrfs_info(fs_info, "has skinny extents");
+ fs_info->zoned = (features & BTRFS_FEATURE_INCOMPAT_ZONED);
+
/*
* flag our filesystem as having big metadata blocks if
* they are bigger than the page size
@@ -3055,6 +3156,9 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
/* Cache block sizes */
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
+ fs_info->sectorsize_bits = ilog2(sectorsize);
+ fs_info->csum_size = btrfs_super_csum_size(disk_super);
+ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize;
/*
@@ -3111,6 +3215,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
chunk_root->node = read_tree_block(fs_info,
btrfs_super_chunk_root(disk_super),
+ BTRFS_CHUNK_TREE_OBJECTID,
generation, level, NULL);
if (IS_ERR(chunk_root->node) ||
!extent_buffer_uptodate(chunk_root->node)) {
@@ -3134,11 +3239,13 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
/*
- * Keep the devid that is marked to be the target device for the
- * device replace procedure
+ * At this point we know all the devices that make this filesystem,
+ * including the seed devices but we don't know yet if the replace
+ * target is required. So free devices that are not part of this
+ * filesystem but skip the replace traget device which is checked
+ * below in btrfs_init_dev_replace().
*/
- btrfs_free_extra_devids(fs_devices, 0);
-
+ btrfs_free_extra_devids(fs_devices);
if (!fs_devices->latest_bdev) {
btrfs_err(fs_info, "failed to read devices");
goto fail_tree_roots;
@@ -3185,7 +3292,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_block_groups;
}
- btrfs_free_extra_devids(fs_devices, 1);
+ ret = btrfs_check_zoned_mode(fs_info);
+ if (ret) {
+ btrfs_err(fs_info, "failed to initialize zoned mode: %d",
+ ret);
+ goto fail_block_groups;
+ }
ret = btrfs_sysfs_add_fsid(fs_devices);
if (ret) {
@@ -3275,22 +3387,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (ret)
goto fail_qgroup;
- if (!sb_rdonly(sb)) {
- ret = btrfs_cleanup_fs_roots(fs_info);
- if (ret)
- goto fail_qgroup;
-
- mutex_lock(&fs_info->cleaner_mutex);
- ret = btrfs_recover_relocation(tree_root);
- mutex_unlock(&fs_info->cleaner_mutex);
- if (ret < 0) {
- btrfs_warn(fs_info, "failed to recover relocation: %d",
- ret);
- err = -EINVAL;
- goto fail_qgroup;
- }
- }
-
fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
@@ -3300,78 +3396,18 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
if (sb_rdonly(sb))
- return 0;
-
- if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
- btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
- clear_free_space_tree = 1;
- } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
- !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
- btrfs_warn(fs_info, "free space tree is invalid");
- clear_free_space_tree = 1;
- }
-
- if (clear_free_space_tree) {
- btrfs_info(fs_info, "clearing free space tree");
- ret = btrfs_clear_free_space_tree(fs_info);
- if (ret) {
- btrfs_warn(fs_info,
- "failed to clear free space tree: %d", ret);
- close_ctree(fs_info);
- return ret;
- }
- }
-
- if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
- !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
- btrfs_info(fs_info, "creating free space tree");
- ret = btrfs_create_free_space_tree(fs_info);
- if (ret) {
- btrfs_warn(fs_info,
- "failed to create free space tree: %d", ret);
- close_ctree(fs_info);
- return ret;
- }
- }
-
- down_read(&fs_info->cleanup_work_sem);
- if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
- (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
- up_read(&fs_info->cleanup_work_sem);
- close_ctree(fs_info);
- return ret;
- }
- up_read(&fs_info->cleanup_work_sem);
+ goto clear_oneshot;
- ret = btrfs_resume_balance_async(fs_info);
- if (ret) {
- btrfs_warn(fs_info, "failed to resume balance: %d", ret);
- close_ctree(fs_info);
- return ret;
- }
-
- ret = btrfs_resume_dev_replace_async(fs_info);
+ ret = btrfs_start_pre_rw_mount(fs_info);
if (ret) {
- btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
close_ctree(fs_info);
return ret;
}
-
- btrfs_qgroup_rescan_resume(fs_info);
btrfs_discard_resume(fs_info);
- if (!fs_info->uuid_root) {
- btrfs_info(fs_info, "creating UUID tree");
- ret = btrfs_create_uuid_tree(fs_info);
- if (ret) {
- btrfs_warn(fs_info,
- "failed to create the UUID tree: %d", ret);
- close_ctree(fs_info);
- return ret;
- }
- } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
- fs_info->generation !=
- btrfs_super_uuid_tree_generation(disk_super)) {
+ if (fs_info->uuid_root &&
+ (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
+ fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
btrfs_info(fs_info, "checking UUID tree");
ret = btrfs_check_uuid_tree(fs_info);
if (ret) {
@@ -3381,14 +3417,11 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
return ret;
}
}
- set_bit(BTRFS_FS_OPEN, &fs_info->flags);
- /*
- * backuproot only affect mount behavior, and if open_ctree succeeded,
- * no need to keep the flag
- */
- btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
+ set_bit(BTRFS_FS_OPEN, &fs_info->flags);
+clear_oneshot:
+ btrfs_clear_oneshot_options(fs_info);
return 0;
fail_qgroup:
@@ -3469,10 +3502,17 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
{
struct btrfs_super_block *super;
struct page *page;
- u64 bytenr;
+ u64 bytenr, bytenr_orig;
struct address_space *mapping = bdev->bd_inode->i_mapping;
+ int ret;
+
+ bytenr_orig = btrfs_sb_offset(copy_num);
+ ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
+ if (ret == -ENOENT)
+ return ERR_PTR(-EINVAL);
+ else if (ret)
+ return ERR_PTR(ret);
- bytenr = btrfs_sb_offset(copy_num);
if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
return ERR_PTR(-EINVAL);
@@ -3486,7 +3526,7 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
return ERR_PTR(-ENODATA);
}
- if (btrfs_super_bytenr(super) != bytenr) {
+ if (btrfs_super_bytenr(super) != bytenr_orig) {
btrfs_release_disk_super(super);
return ERR_PTR(-EINVAL);
}
@@ -3541,7 +3581,8 @@ static int write_dev_supers(struct btrfs_device *device,
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
int i;
int errors = 0;
- u64 bytenr;
+ int ret;
+ u64 bytenr, bytenr_orig;
if (max_mirrors == 0)
max_mirrors = BTRFS_SUPER_MIRROR_MAX;
@@ -3553,12 +3594,22 @@ static int write_dev_supers(struct btrfs_device *device,
struct bio *bio;
struct btrfs_super_block *disk_super;
- bytenr = btrfs_sb_offset(i);
+ bytenr_orig = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
+ if (ret == -ENOENT) {
+ continue;
+ } else if (ret < 0) {
+ btrfs_err(device->fs_info,
+ "couldn't get super block location for mirror %d",
+ i);
+ errors++;
+ continue;
+ }
if (bytenr + BTRFS_SUPER_INFO_SIZE >=
device->commit_total_bytes)
break;
- btrfs_set_super_bytenr(sb, bytenr);
+ btrfs_set_super_bytenr(sb, bytenr_orig);
crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
@@ -3603,6 +3654,7 @@ static int write_dev_supers(struct btrfs_device *device,
bio->bi_opf |= REQ_FUA;
btrfsic_submit_bio(bio);
+ btrfs_advance_sb_log(device, i);
}
return errors < i ? 0 : -1;
}
@@ -3619,6 +3671,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
int i;
int errors = 0;
bool primary_failed = false;
+ int ret;
u64 bytenr;
if (max_mirrors == 0)
@@ -3627,7 +3680,15 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
for (i = 0; i < max_mirrors; i++) {
struct page *page;
- bytenr = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(device, i, READ, &bytenr);
+ if (ret == -ENOENT) {
+ break;
+ } else if (ret < 0) {
+ errors++;
+ if (i == 0)
+ primary_failed = true;
+ continue;
+ }
if (bytenr + BTRFS_SUPER_INFO_SIZE >=
device->commit_total_bytes)
break;
@@ -3941,14 +4002,6 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
}
}
- if (root->free_ino_pinned)
- __btrfs_remove_free_space_cache(root->free_ino_pinned);
- if (root->free_ino_ctl)
- __btrfs_remove_free_space_cache(root->free_ino_ctl);
- if (root->ino_cache_inode) {
- iput(root->ino_cache_inode);
- root->ino_cache_inode = NULL;
- }
if (drop_ref)
btrfs_put_root(root);
}
@@ -4171,8 +4224,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{
- struct btrfs_fs_info *fs_info;
- struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info = buf->fs_info;
u64 transid = btrfs_header_generation(buf);
int was_dirty;
@@ -4185,8 +4237,6 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
return;
#endif
- root = BTRFS_I(buf->pages[0]->mapping->host)->root;
- fs_info = root->fs_info;
btrfs_assert_tree_locked(buf);
if (transid != fs_info->generation)
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
@@ -4691,3 +4741,58 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
return 0;
}
+
+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+{
+ struct btrfs_path *path;
+ int ret;
+ struct extent_buffer *l;
+ struct btrfs_key search_key;
+ struct btrfs_key found_key;
+ int slot;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
+ search_key.type = -1;
+ search_key.offset = (u64)-1;
+ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
+ if (ret < 0)
+ goto error;
+ BUG_ON(ret == 0); /* Corruption */
+ if (path->slots[0] > 0) {
+ slot = path->slots[0] - 1;
+ l = path->nodes[0];
+ btrfs_item_key_to_cpu(l, &found_key, slot);
+ *objectid = max_t(u64, found_key.objectid,
+ BTRFS_FIRST_FREE_OBJECTID - 1);
+ } else {
+ *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
+ }
+ ret = 0;
+error:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
+{
+ int ret;
+ mutex_lock(&root->objectid_mutex);
+
+ if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
+ btrfs_warn(root->fs_info,
+ "the objectid of root %llu reaches its highest value",
+ root->root_key.objectid);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ *objectid = ++root->highest_objectid;
+ ret = 0;
+out:
+ mutex_unlock(&root->objectid_mutex);
+ return ret;
+}
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 182540bdcea0..e45057c0c016 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -43,13 +43,15 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info);
int btrfs_verify_level_key(struct extent_buffer *eb, int level,
struct btrfs_key *first_key, u64 parent_transid);
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 parent_transid, int level,
- struct btrfs_key *first_key);
-void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
+ u64 owner_root, u64 parent_transid,
+ int level, struct btrfs_key *first_key);
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
- u64 bytenr);
+ u64 bytenr, u64 owner_root,
+ int level);
void btrfs_clean_tree_block(struct extent_buffer *buf);
+void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info);
+int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info);
int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
@@ -79,7 +81,7 @@ void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
-int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
+int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio,
struct page *page, u64 start, u64 end,
int mirror);
blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
@@ -112,10 +114,10 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
struct btrfs_key *first_key);
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata);
-blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
- int mirror_num, unsigned long bio_flags,
- u64 bio_offset, void *private_data,
- extent_submit_bio_start_t *submit_bio_start);
+blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
+ int mirror_num, unsigned long bio_flags,
+ u64 dio_file_offset,
+ extent_submit_bio_start_t *submit_bio_start);
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
int mirror_num);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
@@ -131,16 +133,15 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
int __init btrfs_end_io_wq_init(void);
void __cold btrfs_end_io_wq_exit(void);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_init_lockdep(void);
void btrfs_set_buffer_lockdep_class(u64 objectid,
struct extent_buffer *eb, int level);
#else
-static inline void btrfs_init_lockdep(void)
-{ }
static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
struct extent_buffer *eb, int level)
{
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 1a8d419d9e1f..1d4c2397d0d6 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -222,7 +222,6 @@ static int btrfs_get_name(struct dentry *parent, char *name,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index 9800a8306368..04083ee5ae6e 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -21,10 +21,24 @@ struct io_failure_record;
#define EXTENT_NORESERVE (1U << 11)
#define EXTENT_QGROUP_RESERVED (1U << 12)
#define EXTENT_CLEAR_DATA_RESV (1U << 13)
+/*
+ * Must be cleared only during ordered extent completion or on error paths if we
+ * did not manage to submit bios and create the ordered extents for the range.
+ * Should not be cleared during page release and page invalidation (if there is
+ * an ordered extent in flight), that is left for the ordered extent completion.
+ */
#define EXTENT_DELALLOC_NEW (1U << 14)
+/*
+ * When an ordered extent successfully completes for a region marked as a new
+ * delalloc range, use this flag when clearing a new delalloc range to indicate
+ * that the VFS' inode number of bytes should be incremented and the inode's new
+ * delalloc bytes decremented, in an atomic way to prevent races with stat(2).
+ */
+#define EXTENT_ADD_INODE_BYTES (1U << 15)
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
EXTENT_CLEAR_DATA_RESV)
-#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
+#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
+ EXTENT_ADD_INODE_BYTES)
/*
* Redefined bits above which are used only in the device allocation tree,
@@ -73,7 +87,7 @@ struct extent_state {
/* ADD NEW ELEMENTS AFTER THIS */
wait_queue_head_t wq;
refcount_t refs;
- unsigned state;
+ u32 state;
struct io_failure_record *failrec;
@@ -105,19 +119,18 @@ void __cold extent_io_exit(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
- u64 max_bytes, unsigned bits, int contig);
+ u64 max_bytes, u32 bits, int contig);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int filled,
- struct extent_state *cached_state);
+ u32 bits, int filled, struct extent_state *cached_state);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset);
+ u32 bits, struct extent_changeset *changeset);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
+ u32 bits, int wake, int delete,
struct extent_state **cached);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
+ u32 bits, int wake, int delete,
struct extent_state **cached, gfp_t mask,
struct extent_changeset *changeset);
@@ -141,7 +154,7 @@ static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
}
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits)
+ u64 end, u32 bits)
{
int wake = 0;
@@ -152,17 +165,19 @@ static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
}
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset);
+ u32 bits, struct extent_changeset *changeset);
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask);
+ u32 bits, unsigned exclusive_bits, u64 *failed_start,
+ struct extent_state **cached_state, gfp_t mask,
+ struct extent_changeset *changeset);
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits);
+ u32 bits);
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits)
+ u64 end, u32 bits)
{
- return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
+ return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
+ NULL);
}
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -175,8 +190,8 @@ static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
- NULL, mask);
+ return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, NULL,
+ mask, NULL);
}
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
@@ -188,16 +203,16 @@ static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
}
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, unsigned clear_bits,
+ u32 bits, u32 clear_bits,
struct extent_state **cached_state);
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned int extra_bits,
+ u64 end, u32 extra_bits,
struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
- NULL, cached_state, GFP_NOFS);
+ 0, NULL, cached_state, GFP_NOFS, NULL);
}
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
@@ -205,30 +220,30 @@ static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
- NULL, cached_state, GFP_NOFS);
+ 0, NULL, cached_state, GFP_NOFS, NULL);
}
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
u64 end)
{
- return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
- GFP_NOFS);
+ return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, NULL,
+ GFP_NOFS, NULL);
}
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached_state, gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
- cached_state, mask);
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
+ cached_state, mask, NULL);
}
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits,
+ u64 *start_ret, u64 *end_ret, u32 bits,
struct extent_state **cached_state);
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits);
+ u64 *start_ret, u64 *end_ret, u32 bits);
int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits);
+ u64 *start_ret, u64 *end_ret, u32 bits);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5fd60b13f4f8..56ea380f5a17 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1465,7 +1465,6 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
/* this will setup the path even if it fails to insert the back ref */
ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
parent, root_objectid, owner,
@@ -1489,7 +1488,6 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- path->leave_spinning = 1;
/* now insert the actual backref */
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
BUG_ON(refs_to_add != 1);
@@ -1605,7 +1603,6 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
}
again:
- path->leave_spinning = 1;
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
if (ret < 0) {
err = ret;
@@ -2133,25 +2130,6 @@ static u64 find_middle(struct rb_root *root)
#endif
/*
- * Takes the number of bytes to be csumm'ed and figures out how many leaves it
- * would require to store the csums for that many bytes.
- */
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
-{
- u64 csum_size;
- u64 num_csums_per_leaf;
- u64 num_csums;
-
- csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
- num_csums_per_leaf = div64_u64(csum_size,
- (u64)btrfs_super_csum_size(fs_info->super_copy));
- num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
- num_csums += num_csums_per_leaf - 1;
- num_csums = div64_u64(num_csums, num_csums_per_leaf);
- return num_csums;
-}
-
-/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
* 0, which means to process everything in the tree at the start
@@ -2663,6 +2641,11 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
BUG_ON(!btrfs_block_group_done(block_group));
ret = btrfs_remove_free_space(block_group, start, num_bytes);
} else {
+ /*
+ * We must wait for v1 caching to finish, otherwise we may not
+ * remove our space.
+ */
+ btrfs_wait_space_cache_v1_finished(block_group, caching_ctl);
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
@@ -2730,31 +2713,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
atomic_inc(&bg->reservations);
}
-void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_caching_control *next;
- struct btrfs_caching_control *caching_ctl;
- struct btrfs_block_group *cache;
-
- down_write(&fs_info->commit_root_sem);
-
- list_for_each_entry_safe(caching_ctl, next,
- &fs_info->caching_block_groups, list) {
- cache = caching_ctl->block_group;
- if (btrfs_block_group_done(cache)) {
- cache->last_byte_to_unpin = (u64)-1;
- list_del_init(&caching_ctl->list);
- btrfs_put_caching_control(caching_ctl);
- } else {
- cache->last_byte_to_unpin = caching_ctl->progress;
- }
- }
-
- up_write(&fs_info->commit_root_sem);
-
- btrfs_update_global_block_rsv(fs_info);
-}
-
/*
* Returns the free cluster for the given space info and sets empty_cluster to
* what it should be based on the mount options.
@@ -2816,11 +2774,13 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
len = cache->start + cache->length - start;
len = min(len, end + 1 - start);
- if (start < cache->last_byte_to_unpin) {
- len = min(len, cache->last_byte_to_unpin - start);
- if (return_free_space)
- btrfs_add_free_space(cache, start, len);
+ down_read(&fs_info->commit_root_sem);
+ if (start < cache->last_byte_to_unpin && return_free_space) {
+ u64 add_len = min(len, cache->last_byte_to_unpin - start);
+
+ btrfs_add_free_space(cache, start, add_len);
}
+ up_read(&fs_info->commit_root_sem);
start += len;
total_unpinned += len;
@@ -3040,8 +3000,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
-
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
if (!is_data && refs_to_drop != 1) {
@@ -3106,7 +3064,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_release_path(path);
- path->leave_spinning = 1;
/* Slow path to locate EXTENT/METADATA_ITEM */
key.objectid = bytenr;
@@ -4448,7 +4405,6 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
ins, size);
if (ret) {
@@ -4533,7 +4489,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
&extent_key, size);
if (ret) {
@@ -4662,7 +4617,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
- buf = btrfs_find_create_tree_block(fs_info, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
if (IS_ERR(buf))
return buf;
@@ -4679,12 +4634,16 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ERR_PTR(-EUCLEAN);
}
+ /*
+ * This needs to stay, because we could allocate a freed block from an
+ * old tree into a new tree, so we need to make sure this new block is
+ * set to the appropriate level and owner.
+ */
btrfs_set_buffer_lockdep_class(owner, buf, level);
__btrfs_tree_lock(buf, nest);
btrfs_clean_tree_block(buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
- btrfs_set_lock_blocking_write(buf);
set_extent_buffer_uptodate(buf);
memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
@@ -4905,7 +4864,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
continue;
}
reada:
- readahead_tree_block(fs_info, bytenr);
+ btrfs_readahead_node_child(eb, slot);
nread++;
}
wc->reada_slot = slot;
@@ -5064,16 +5023,13 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
next = find_extent_buffer(fs_info, bytenr);
if (!next) {
- next = btrfs_find_create_tree_block(fs_info, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr,
+ root->root_key.objectid, level - 1);
if (IS_ERR(next))
return PTR_ERR(next);
-
- btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
- level - 1);
reada = 1;
}
btrfs_tree_lock(next);
- btrfs_set_lock_blocking_write(next);
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
&wc->refs[level - 1],
@@ -5124,8 +5080,8 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (!next) {
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
- next = read_tree_block(fs_info, bytenr, generation, level - 1,
- &first_key);
+ next = read_tree_block(fs_info, bytenr, root->root_key.objectid,
+ generation, level - 1, &first_key);
if (IS_ERR(next)) {
return PTR_ERR(next);
} else if (!extent_buffer_uptodate(next)) {
@@ -5133,7 +5089,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
return -EIO;
}
btrfs_tree_lock(next);
- btrfs_set_lock_blocking_write(next);
}
level--;
@@ -5145,7 +5100,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
}
path->nodes[level] = next;
path->slots[level] = 0;
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
wc->level = level;
if (wc->level == 1)
wc->reada_slot = 0;
@@ -5273,8 +5228,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (!path->locks[level]) {
BUG_ON(level == 0);
btrfs_tree_lock(eb);
- btrfs_set_lock_blocking_write(eb);
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
ret = btrfs_lookup_extent_info(trans, fs_info,
eb->start, level, 1,
@@ -5317,8 +5271,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (!path->locks[level] &&
btrfs_header_generation(eb) == trans->transid) {
btrfs_tree_lock(eb);
- btrfs_set_lock_blocking_write(eb);
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
}
btrfs_clean_tree_block(eb);
}
@@ -5486,9 +5439,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
- btrfs_set_lock_blocking_write(path->nodes[level]);
path->slots[level] = 0;
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
memset(&wc->update_progress, 0,
sizeof(wc->update_progress));
} else {
@@ -5496,7 +5448,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
memcpy(&wc->update_progress, &key,
sizeof(wc->update_progress));
- level = root_item->drop_level;
+ level = btrfs_root_drop_level(root_item);
BUG_ON(level == 0);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -5516,8 +5468,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
level = btrfs_header_level(root->node);
while (1) {
btrfs_tree_lock(path->nodes[level]);
- btrfs_set_lock_blocking_write(path->nodes[level]);
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
ret = btrfs_lookup_extent_info(trans, fs_info,
path->nodes[level]->start,
@@ -5529,7 +5480,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
}
BUG_ON(wc->refs[level] == 0);
- if (level == root_item->drop_level)
+ if (level == btrfs_root_drop_level(root_item))
break;
btrfs_tree_unlock(path->nodes[level]);
@@ -5574,7 +5525,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
}
btrfs_cpu_key_to_disk(&root_item->drop_progress,
&wc->drop_progress);
- root_item->drop_level = wc->drop_level;
+ btrfs_set_root_drop_level(root_item, wc->drop_level);
BUG_ON(wc->level == 0);
if (btrfs_should_end_transaction(trans) ||
@@ -5704,7 +5655,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
level = btrfs_header_level(node);
path->nodes[level] = node;
path->slots[level] = 0;
- path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_WRITE_LOCK;
wc->refs[parent_level] = 1;
wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 60f5f68d892d..6e3b72e63e42 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -142,7 +142,7 @@ struct extent_page_data {
unsigned int sync_io:1;
};
-static int add_extent_changeset(struct extent_state *state, unsigned bits,
+static int add_extent_changeset(struct extent_state *state, u32 bits,
struct extent_changeset *changeset,
int set)
{
@@ -530,7 +530,7 @@ static void merge_state(struct extent_io_tree *tree,
}
static void set_state_bits(struct extent_io_tree *tree,
- struct extent_state *state, unsigned *bits,
+ struct extent_state *state, u32 *bits,
struct extent_changeset *changeset);
/*
@@ -547,7 +547,7 @@ static int insert_state(struct extent_io_tree *tree,
struct extent_state *state, u64 start, u64 end,
struct rb_node ***p,
struct rb_node **parent,
- unsigned *bits, struct extent_changeset *changeset)
+ u32 *bits, struct extent_changeset *changeset)
{
struct rb_node *node;
@@ -628,11 +628,11 @@ static struct extent_state *next_state(struct extent_state *state)
*/
static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
struct extent_state *state,
- unsigned *bits, int wake,
+ u32 *bits, int wake,
struct extent_changeset *changeset)
{
struct extent_state *next;
- unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
+ u32 bits_to_clear = *bits & ~EXTENT_CTLBITS;
int ret;
if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
@@ -695,9 +695,9 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
* This takes the tree lock, and returns 0 on success and < 0 on error.
*/
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
- struct extent_state **cached_state,
- gfp_t mask, struct extent_changeset *changeset)
+ u32 bits, int wake, int delete,
+ struct extent_state **cached_state,
+ gfp_t mask, struct extent_changeset *changeset)
{
struct extent_state *state;
struct extent_state *cached;
@@ -868,7 +868,7 @@ static void wait_on_state(struct extent_io_tree *tree,
* The tree lock is taken by this function
*/
static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned long bits)
+ u32 bits)
{
struct extent_state *state;
struct rb_node *node;
@@ -915,9 +915,9 @@ out:
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state,
- unsigned *bits, struct extent_changeset *changeset)
+ u32 *bits, struct extent_changeset *changeset)
{
- unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
+ u32 bits_to_set = *bits & ~EXTENT_CTLBITS;
int ret;
if (tree->private_data && is_data_inode(tree->private_data))
@@ -961,12 +961,10 @@ static void cache_state(struct extent_state *state,
*
* [start, end] is inclusive This takes the tree lock.
*/
-
-static int __must_check
-__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, unsigned exclusive_bits,
- u64 *failed_start, struct extent_state **cached_state,
- gfp_t mask, struct extent_changeset *changeset)
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
+ u32 exclusive_bits, u64 *failed_start,
+ struct extent_state **cached_state, gfp_t mask,
+ struct extent_changeset *changeset)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -980,6 +978,10 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range(tree, start, end);
trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
+ if (exclusive_bits)
+ ASSERT(failed_start);
+ else
+ ASSERT(failed_start == NULL);
again:
if (!prealloc && gfpflags_allow_blocking(mask)) {
/*
@@ -1179,15 +1181,6 @@ out:
}
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, u64 * failed_start,
- struct extent_state **cached_state, gfp_t mask)
-{
- return __set_extent_bit(tree, start, end, bits, 0, failed_start,
- cached_state, mask, NULL);
-}
-
-
/**
* convert_extent_bit - convert all bits in a given range from one bit to
* another
@@ -1207,7 +1200,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* All allocations are done with GFP_NOFS.
*/
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, unsigned clear_bits,
+ u32 bits, u32 clear_bits,
struct extent_state **cached_state)
{
struct extent_state *state;
@@ -1408,7 +1401,7 @@ out:
/* wrappers around set/clear extent bit */
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset)
+ u32 bits, struct extent_changeset *changeset)
{
/*
* We don't support EXTENT_LOCKED yet, as current changeset will
@@ -1418,19 +1411,19 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON(bits & EXTENT_LOCKED);
- return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
- changeset);
+ return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
+ changeset);
}
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits)
+ u32 bits)
{
- return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
- GFP_NOWAIT, NULL);
+ return set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
+ GFP_NOWAIT, NULL);
}
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int wake, int delete,
+ u32 bits, int wake, int delete,
struct extent_state **cached)
{
return __clear_extent_bit(tree, start, end, bits, wake, delete,
@@ -1438,7 +1431,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
}
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, struct extent_changeset *changeset)
+ u32 bits, struct extent_changeset *changeset)
{
/*
* Don't support EXTENT_LOCKED case, same reason as
@@ -1461,9 +1454,9 @@ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u64 failed_start;
while (1) {
- err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
- EXTENT_LOCKED, &failed_start,
- cached_state, GFP_NOFS, NULL);
+ err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
+ EXTENT_LOCKED, &failed_start,
+ cached_state, GFP_NOFS, NULL);
if (err == -EEXIST) {
wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
start = failed_start;
@@ -1479,8 +1472,8 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
int err;
u64 failed_start;
- err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
- &failed_start, NULL, GFP_NOFS, NULL);
+ err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
+ &failed_start, NULL, GFP_NOFS, NULL);
if (err == -EEXIST) {
if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1,
@@ -1526,8 +1519,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
* nothing was found after 'start'
*/
static struct extent_state *
-find_first_extent_bit_state(struct extent_io_tree *tree,
- u64 start, unsigned bits)
+find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits)
{
struct rb_node *node;
struct extent_state *state;
@@ -1554,14 +1546,15 @@ out:
}
/*
- * find the first offset in the io tree with 'bits' set. zero is
- * returned if we find something, and *start_ret and *end_ret are
- * set to reflect the state struct that was found.
+ * Find the first offset in the io tree with one or more @bits set.
*
- * If nothing was found, 1 is returned. If found something, return 0.
+ * Note: If there are multiple bits set in @bits, any of them will match.
+ *
+ * Return 0 if we find something, and update @start_ret and @end_ret.
+ * Return 1 if we found nothing.
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits,
+ u64 *start_ret, u64 *end_ret, u32 bits,
struct extent_state **cached_state)
{
struct extent_state *state;
@@ -1612,7 +1605,7 @@ out:
* returned will be the full contiguous area with the bits set.
*/
int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits)
+ u64 *start_ret, u64 *end_ret, u32 bits)
{
struct extent_state *state;
int ret = 1;
@@ -1649,7 +1642,7 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
* trim @end_ret to the appropriate size.
*/
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, unsigned bits)
+ u64 *start_ret, u64 *end_ret, u32 bits)
{
struct extent_state *state;
struct rb_node *node, *prev = NULL, *next;
@@ -1946,7 +1939,7 @@ static int __process_pages_contig(struct address_space *mapping,
unsigned long page_ops, pgoff_t *index_ret)
{
unsigned long nr_pages = end_index - start_index + 1;
- unsigned long pages_locked = 0;
+ unsigned long pages_processed = 0;
pgoff_t index = start_index;
struct page *pages[16];
unsigned ret;
@@ -1981,7 +1974,7 @@ static int __process_pages_contig(struct address_space *mapping,
if (locked_page && pages[i] == locked_page) {
put_page(pages[i]);
- pages_locked++;
+ pages_processed++;
continue;
}
if (page_ops & PAGE_CLEAR_DIRTY)
@@ -2006,7 +1999,7 @@ static int __process_pages_contig(struct address_space *mapping,
}
}
put_page(pages[i]);
- pages_locked++;
+ pages_processed++;
}
nr_pages -= ret;
index += ret;
@@ -2014,14 +2007,13 @@ static int __process_pages_contig(struct address_space *mapping,
}
out:
if (err && index_ret)
- *index_ret = start_index + pages_locked - 1;
+ *index_ret = start_index + pages_processed - 1;
return err;
}
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
- unsigned clear_bits,
- unsigned long page_ops)
+ u32 clear_bits, unsigned long page_ops)
{
clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
@@ -2037,7 +2029,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
*/
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end, u64 max_bytes,
- unsigned bits, int contig)
+ u32 bits, int contig)
{
struct rb_node *node;
struct extent_state *state;
@@ -2157,7 +2149,7 @@ out:
* range is found set.
*/
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, int filled, struct extent_state *cached)
+ u32 bits, int filled, struct extent_state *cached)
{
struct extent_state *state = NULL;
struct rb_node *node;
@@ -2642,7 +2634,7 @@ static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
}
blk_status_t btrfs_submit_read_repair(struct inode *inode,
- struct bio *failed_bio, u64 phy_offset,
+ struct bio *failed_bio, u32 bio_offset,
struct page *page, unsigned int pgoff,
u64 start, u64 end, int failed_mirror,
submit_bio_hook_t *submit_bio_hook)
@@ -2652,7 +2644,7 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
- const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits;
+ const int icsum = bio_offset >> fs_info->sectorsize_bits;
bool need_validation;
struct bio *repair_bio;
struct btrfs_io_bio *repair_io_bio;
@@ -2685,7 +2677,7 @@ blk_status_t btrfs_submit_read_repair(struct inode *inode,
repair_bio->bi_private = failed_bio->bi_private;
if (failed_io_bio->csum) {
- const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csum_size = fs_info->csum_size;
repair_io_bio->csum = repair_io_bio->csum_inline;
memcpy(repair_io_bio->csum,
@@ -2775,16 +2767,88 @@ static void end_bio_extent_writepage(struct bio *bio)
bio_put(bio);
}
-static void
-endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
- int uptodate)
+/*
+ * Record previously processed extent range
+ *
+ * For endio_readpage_release_extent() to handle a full extent range, reducing
+ * the extent io operations.
+ */
+struct processed_extent {
+ struct btrfs_inode *inode;
+ /* Start of the range in @inode */
+ u64 start;
+ /* End of the range in in @inode */
+ u64 end;
+ bool uptodate;
+};
+
+/*
+ * Try to release processed extent range
+ *
+ * May not release the extent range right now if the current range is
+ * contiguous to processed extent.
+ *
+ * Will release processed extent when any of @inode, @uptodate, the range is
+ * no longer contiguous to the processed range.
+ *
+ * Passing @inode == NULL will force processed extent to be released.
+ */
+static void endio_readpage_release_extent(struct processed_extent *processed,
+ struct btrfs_inode *inode, u64 start, u64 end,
+ bool uptodate)
{
struct extent_state *cached = NULL;
- u64 end = start + len - 1;
+ struct extent_io_tree *tree;
+
+ /* The first extent, initialize @processed */
+ if (!processed->inode)
+ goto update;
- if (uptodate && tree->track_uptodate)
- set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
- unlock_extent_cached_atomic(tree, start, end, &cached);
+ /*
+ * Contiguous to processed extent, just uptodate the end.
+ *
+ * Several things to notice:
+ *
+ * - bio can be merged as long as on-disk bytenr is contiguous
+ * This means we can have page belonging to other inodes, thus need to
+ * check if the inode still matches.
+ * - bvec can contain range beyond current page for multi-page bvec
+ * Thus we need to do processed->end + 1 >= start check
+ */
+ if (processed->inode == inode && processed->uptodate == uptodate &&
+ processed->end + 1 >= start && end >= processed->end) {
+ processed->end = end;
+ return;
+ }
+
+ tree = &processed->inode->io_tree;
+ /*
+ * Now we don't have range contiguous to the processed range, release
+ * the processed range now.
+ */
+ if (processed->uptodate && tree->track_uptodate)
+ set_extent_uptodate(tree, processed->start, processed->end,
+ &cached, GFP_ATOMIC);
+ unlock_extent_cached_atomic(tree, processed->start, processed->end,
+ &cached);
+
+update:
+ /* Update processed to current range */
+ processed->inode = inode;
+ processed->start = start;
+ processed->end = end;
+ processed->uptodate = uptodate;
+}
+
+static void endio_readpage_update_page_status(struct page *page, bool uptodate)
+{
+ if (uptodate) {
+ SetPageUptodate(page);
+ } else {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+ unlock_page(page);
}
/*
@@ -2804,12 +2868,12 @@ static void end_bio_extent_readpage(struct bio *bio)
int uptodate = !bio->bi_status;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct extent_io_tree *tree, *failure_tree;
- u64 offset = 0;
- u64 start;
- u64 end;
- u64 len;
- u64 extent_start = 0;
- u64 extent_len = 0;
+ struct processed_extent processed = { 0 };
+ /*
+ * The offset to the beginning of a bio, since one bio can never be
+ * larger than UINT_MAX, u32 here is enough.
+ */
+ u32 bio_offset = 0;
int mirror;
int ret;
struct bvec_iter_all iter_all;
@@ -2819,42 +2883,48 @@ static void end_bio_extent_readpage(struct bio *bio)
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ const u32 sectorsize = fs_info->sectorsize;
+ u64 start;
+ u64 end;
+ u32 len;
btrfs_debug(fs_info,
"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
- (u64)bio->bi_iter.bi_sector, bio->bi_status,
+ bio->bi_iter.bi_sector, bio->bi_status,
io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
- /* We always issue full-page reads, but if some block
- * in a page fails to read, blk_update_request() will
- * advance bv_offset and adjust bv_len to compensate.
- * Print a warning for nonzero offsets, and an error
- * if they don't add up to a full page. */
- if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
- if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
- btrfs_err(fs_info,
- "partial page read in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- else
- btrfs_info(fs_info,
- "incomplete page read in btrfs with offset %u and length %u",
- bvec->bv_offset, bvec->bv_len);
- }
-
- start = page_offset(page);
- end = start + bvec->bv_offset + bvec->bv_len - 1;
+ /*
+ * We always issue full-sector reads, but if some block in a
+ * page fails to read, blk_update_request() will advance
+ * bv_offset and adjust bv_len to compensate. Print a warning
+ * for unaligned offsets, and an error if they don't add up to
+ * a full sector.
+ */
+ if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
+ btrfs_err(fs_info,
+ "partial page read in btrfs with offset %u and length %u",
+ bvec->bv_offset, bvec->bv_len);
+ else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
+ sectorsize))
+ btrfs_info(fs_info,
+ "incomplete page read with offset %u and length %u",
+ bvec->bv_offset, bvec->bv_len);
+
+ start = page_offset(page) + bvec->bv_offset;
+ end = start + bvec->bv_len - 1;
len = bvec->bv_len;
mirror = io_bio->mirror_num;
if (likely(uptodate)) {
if (is_data_inode(inode))
- ret = btrfs_verify_data_csum(io_bio, offset, page,
- start, end, mirror);
+ ret = btrfs_verify_data_csum(io_bio,
+ bio_offset, page, start, end,
+ mirror);
else
ret = btrfs_validate_metadata_buffer(io_bio,
- offset, page, start, end, mirror);
+ page, start, end, mirror);
if (ret)
uptodate = 0;
else
@@ -2879,12 +2949,14 @@ static void end_bio_extent_readpage(struct bio *bio)
* If it can't handle the error it will return -EIO and
* we remain responsible for that page.
*/
- if (!btrfs_submit_read_repair(inode, bio, offset, page,
+ if (!btrfs_submit_read_repair(inode, bio, bio_offset,
+ page,
start - page_offset(page),
start, end, mirror,
btrfs_submit_data_bio)) {
uptodate = !bio->bi_status;
- offset += len;
+ ASSERT(bio_offset + len > bio_offset);
+ bio_offset += len;
continue;
}
} else {
@@ -2908,40 +2980,17 @@ readpage_ok:
off = offset_in_page(i_size);
if (page->index == end_index && off)
zero_user_segment(page, off, PAGE_SIZE);
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
- offset += len;
-
- if (unlikely(!uptodate)) {
- if (extent_len) {
- endio_readpage_release_extent(tree,
- extent_start,
- extent_len, 1);
- extent_start = 0;
- extent_len = 0;
- }
- endio_readpage_release_extent(tree, start,
- end - start + 1, 0);
- } else if (!extent_len) {
- extent_start = start;
- extent_len = end + 1 - start;
- } else if (extent_start + extent_len == start) {
- extent_len += end + 1 - start;
- } else {
- endio_readpage_release_extent(tree, extent_start,
- extent_len, uptodate);
- extent_start = start;
- extent_len = end + 1 - start;
}
- }
+ ASSERT(bio_offset + len > bio_offset);
+ bio_offset += len;
- if (extent_len)
- endio_readpage_release_extent(tree, extent_start, extent_len,
- uptodate);
+ /* Update page status and unlock */
+ endio_readpage_update_page_status(page, uptodate);
+ endio_readpage_release_extent(&processed, BTRFS_I(inode),
+ start, end, uptodate);
+ }
+ /* Release the last extent */
+ endio_readpage_release_extent(&processed, NULL, 0, 0, false);
btrfs_io_bio_free_csum(io_bio);
bio_put(bio);
}
@@ -3038,7 +3087,7 @@ static int submit_extent_page(unsigned int opf,
{
int ret = 0;
struct bio *bio;
- size_t page_size = min_t(size_t, size, PAGE_SIZE);
+ size_t io_size = min_t(size_t, size, PAGE_SIZE);
sector_t sector = offset >> 9;
struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -3054,12 +3103,12 @@ static int submit_extent_page(unsigned int opf,
else
contig = bio_end_sector(bio) == sector;
- if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
+ if (btrfs_bio_fits_in_stripe(page, io_size, bio, bio_flags))
can_merge = false;
if (prev_bio_flags != bio_flags || !contig || !can_merge ||
force_bio_submit ||
- bio_add_page(bio, page, page_size, pg_offset) < page_size) {
+ bio_add_page(bio, page, io_size, pg_offset) < io_size) {
ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
if (ret < 0) {
*bio_ret = NULL;
@@ -3068,13 +3117,13 @@ static int submit_extent_page(unsigned int opf,
bio = NULL;
} else {
if (wbc)
- wbc_account_cgroup_owner(wbc, page, page_size);
+ wbc_account_cgroup_owner(wbc, page, io_size);
return 0;
}
}
bio = btrfs_bio_alloc(offset);
- bio_add_page(bio, page, page_size, pg_offset);
+ bio_add_page(bio, page, io_size, pg_offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
bio->bi_write_hint = page->mapping->host->i_write_hint;
@@ -3085,7 +3134,7 @@ static int submit_extent_page(unsigned int opf,
bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
bio_set_dev(bio, bdev);
wbc_init_bio(wbc, bio);
- wbc_account_cgroup_owner(wbc, page, page_size);
+ wbc_account_cgroup_owner(wbc, page, io_size);
}
*bio_ret = bio;
@@ -3096,6 +3145,15 @@ static int submit_extent_page(unsigned int opf,
static void attach_extent_buffer_page(struct extent_buffer *eb,
struct page *page)
{
+ /*
+ * If the page is mapped to btree inode, we should hold the private
+ * lock to prevent race.
+ * For cloned or dummy extent buffers, their pages are not mapped and
+ * will not race with any other ebs.
+ */
+ if (page->mapping)
+ lockdep_assert_held(&page->mapping->private_lock);
+
if (!PagePrivate(page))
attach_page_private(page, eb);
else
@@ -3158,7 +3216,6 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
int nr = 0;
size_t pg_offset = 0;
size_t iosize;
- size_t disk_io_size;
size_t blocksize = inode->i_sb->s_blocksize;
unsigned long this_bio_flag = 0;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
@@ -3224,13 +3281,10 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
iosize = min(extent_map_end(em) - cur, end - cur + 1);
cur_end = min(extent_map_end(em) - 1, end);
iosize = ALIGN(iosize, blocksize);
- if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
- disk_io_size = em->block_len;
+ if (this_bio_flag & EXTENT_BIO_COMPRESSED)
offset = em->block_start;
- } else {
+ else
offset = em->block_start + extent_offset;
- disk_io_size = iosize;
- }
block_start = em->block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
block_start = EXTENT_MAP_HOLE;
@@ -3319,7 +3373,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
}
ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
- page, offset, disk_io_size,
+ page, offset, iosize,
pg_offset, bio,
end_bio_extent_readpage, 0,
*bio_flags,
@@ -3656,11 +3710,14 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
}
/*
- * Lock eb pages and flush the bio if we can't the locks
+ * Lock extent buffer status and pages for writeback.
+ *
+ * May try to flush write bio if we can't get the lock.
*
- * Return 0 if nothing went wrong
- * Return >0 is same as 0, except bio is not submitted
- * Return <0 if something went wrong, no page is locked
+ * Return 0 if the extent buffer doesn't need to be submitted.
+ * (E.g. the extent buffer is not dirty)
+ * Return >0 is the extent buffer is submitted to bio.
+ * Return <0 if something went wrong, no page is locked.
*/
static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
struct extent_page_data *epd)
@@ -3930,10 +3987,81 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
return ret;
}
+/*
+ * Submit all page(s) of one extent buffer.
+ *
+ * @page: the page of one extent buffer
+ * @eb_context: to determine if we need to submit this page, if current page
+ * belongs to this eb, we don't need to submit
+ *
+ * The caller should pass each page in their bytenr order, and here we use
+ * @eb_context to determine if we have submitted pages of one extent buffer.
+ *
+ * If we have, we just skip until we hit a new page that doesn't belong to
+ * current @eb_context.
+ *
+ * If not, we submit all the page(s) of the extent buffer.
+ *
+ * Return >0 if we have submitted the extent buffer successfully.
+ * Return 0 if we don't need to submit the page, as it's already submitted by
+ * previous call.
+ * Return <0 for fatal error.
+ */
+static int submit_eb_page(struct page *page, struct writeback_control *wbc,
+ struct extent_page_data *epd,
+ struct extent_buffer **eb_context)
+{
+ struct address_space *mapping = page->mapping;
+ struct extent_buffer *eb;
+ int ret;
+
+ if (!PagePrivate(page))
+ return 0;
+
+ spin_lock(&mapping->private_lock);
+ if (!PagePrivate(page)) {
+ spin_unlock(&mapping->private_lock);
+ return 0;
+ }
+
+ eb = (struct extent_buffer *)page->private;
+
+ /*
+ * Shouldn't happen and normally this would be a BUG_ON but no point
+ * crashing the machine for something we can survive anyway.
+ */
+ if (WARN_ON(!eb)) {
+ spin_unlock(&mapping->private_lock);
+ return 0;
+ }
+
+ if (eb == *eb_context) {
+ spin_unlock(&mapping->private_lock);
+ return 0;
+ }
+ ret = atomic_inc_not_zero(&eb->refs);
+ spin_unlock(&mapping->private_lock);
+ if (!ret)
+ return 0;
+
+ *eb_context = eb;
+
+ ret = lock_extent_buffer_for_io(eb, epd);
+ if (ret <= 0) {
+ free_extent_buffer(eb);
+ return ret;
+ }
+ ret = write_one_eb(eb, wbc, epd);
+ free_extent_buffer(eb);
+ if (ret < 0)
+ return ret;
+ return 1;
+}
+
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct extent_buffer *eb, *prev_eb = NULL;
+ struct extent_buffer *eb_context = NULL;
struct extent_page_data epd = {
.bio = NULL,
.extent_locked = 0,
@@ -3979,55 +4107,13 @@ retry:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- if (!PagePrivate(page))
- continue;
-
- spin_lock(&mapping->private_lock);
- if (!PagePrivate(page)) {
- spin_unlock(&mapping->private_lock);
+ ret = submit_eb_page(page, wbc, &epd, &eb_context);
+ if (ret == 0)
continue;
- }
-
- eb = (struct extent_buffer *)page->private;
-
- /*
- * Shouldn't happen and normally this would be a BUG_ON
- * but no sense in crashing the users box for something
- * we can survive anyway.
- */
- if (WARN_ON(!eb)) {
- spin_unlock(&mapping->private_lock);
- continue;
- }
-
- if (eb == prev_eb) {
- spin_unlock(&mapping->private_lock);
- continue;
- }
-
- ret = atomic_inc_not_zero(&eb->refs);
- spin_unlock(&mapping->private_lock);
- if (!ret)
- continue;
-
- prev_eb = eb;
- ret = lock_extent_buffer_for_io(eb, &epd);
- if (!ret) {
- free_extent_buffer(eb);
- continue;
- } else if (ret < 0) {
- done = 1;
- free_extent_buffer(eb);
- break;
- }
-
- ret = write_one_eb(eb, wbc, &epd);
- if (ret) {
+ if (ret < 0) {
done = 1;
- free_extent_buffer(eb);
break;
}
- free_extent_buffer(eb);
/*
* the filesystem may choose to bump up nr_to_write.
@@ -4048,7 +4134,6 @@ retry:
index = 0;
goto retry;
}
- ASSERT(ret <= 0);
if (ret < 0) {
end_write_bio(&epd, ret);
return ret;
@@ -4382,14 +4467,22 @@ int extent_invalidatepage(struct extent_io_tree *tree,
u64 end = start + PAGE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
+ /* This function is only called for the btree inode */
+ ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
+
start += ALIGN(offset, blocksize);
if (start > end)
return 0;
lock_extent_bits(tree, start, end, &cached_state);
wait_on_page_writeback(page);
- clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 1, 1, &cached_state);
+
+ /*
+ * Currently for btree io tree, only EXTENT_LOCKED is utilized,
+ * so here we only need to unlock the extent range to free any
+ * existing extent state.
+ */
+ unlock_extent_cached(tree, start, end, &cached_state);
return 0;
}
@@ -4409,12 +4502,14 @@ static int try_release_extent_state(struct extent_io_tree *tree,
ret = 0;
} else {
/*
- * at this point we can safely clear everything except the
- * locked bit and the nodatasum bit
+ * At this point we can safely clear everything except the
+ * locked bit, the nodatasum bit and the delalloc new bit.
+ * The delalloc new bit will be cleared by ordered extent
+ * completion.
*/
ret = __clear_extent_bit(tree, start, end,
- ~(EXTENT_LOCKED | EXTENT_NODATASUM),
- 0, 0, NULL, mask, NULL);
+ ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW),
+ 0, 0, NULL, mask, NULL);
/* if clear_extent_bit failed for enomem reasons,
* we can't allow the release to continue.
@@ -4691,7 +4786,6 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
roots = ulist_alloc(GFP_KERNEL);
tmp_ulist = ulist_alloc(GFP_KERNEL);
@@ -4950,12 +5044,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
eb->len = len;
eb->fs_info = fs_info;
eb->bflags = 0;
- rwlock_init(&eb->lock);
- atomic_set(&eb->blocking_readers, 0);
- eb->blocking_writers = 0;
- eb->lock_recursed = false;
- init_waitqueue_head(&eb->write_lock_wq);
- init_waitqueue_head(&eb->read_lock_wq);
+ init_rwsem(&eb->lock);
btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
&fs_info->allocated_ebs);
@@ -4964,19 +5053,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
atomic_set(&eb->refs, 1);
atomic_set(&eb->io_pages, 0);
- /*
- * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
- */
- BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
- > MAX_INLINE_EXTENT_BUFFER_SIZE);
- BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
-
-#ifdef CONFIG_BTRFS_DEBUG
- eb->spinning_writers = 0;
- atomic_set(&eb->spinning_readers, 0);
- atomic_set(&eb->read_locks, 0);
- eb->write_locks = 0;
-#endif
+ ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
return eb;
}
@@ -5105,7 +5182,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
rcu_read_lock();
eb = radix_tree_lookup(&fs_info->buffer_radix,
- start >> PAGE_SHIFT);
+ start >> fs_info->sectorsize_bits);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
/*
@@ -5157,7 +5234,7 @@ again:
}
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> PAGE_SHIFT, eb);
+ start >> fs_info->sectorsize_bits, eb);
spin_unlock(&fs_info->buffer_lock);
radix_tree_preload_end();
if (ret == -EEXIST) {
@@ -5178,7 +5255,7 @@ free_eb:
#endif
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start)
+ u64 start, u64 owner_root, int level)
{
unsigned long len = fs_info->nodesize;
int num_pages;
@@ -5196,6 +5273,14 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
return ERR_PTR(-EINVAL);
}
+ if (fs_info->sectorsize < PAGE_SIZE &&
+ offset_in_page(start) + len > PAGE_SIZE) {
+ btrfs_err(fs_info,
+ "tree block crosses page boundary, start %llu nodesize %lu",
+ start, len);
+ return ERR_PTR(-EINVAL);
+ }
+
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
@@ -5203,6 +5288,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
return ERR_PTR(-ENOMEM);
+ btrfs_set_buffer_lockdep_class(owner_root, eb, level);
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++, index++) {
@@ -5231,13 +5317,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
}
exists = NULL;
- /*
- * Do this so attach doesn't complain and we need to
- * drop the ref the old guy had.
- */
- ClearPagePrivate(p);
WARN_ON(PageDirty(p));
- put_page(p);
+ detach_page_private(p);
}
attach_extent_buffer_page(eb, p);
spin_unlock(&mapping->private_lock);
@@ -5265,7 +5346,7 @@ again:
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
- start >> PAGE_SHIFT, eb);
+ start >> fs_info->sectorsize_bits, eb);
spin_unlock(&fs_info->buffer_lock);
radix_tree_preload_end();
if (ret == -EEXIST) {
@@ -5321,7 +5402,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
spin_lock(&fs_info->buffer_lock);
radix_tree_delete(&fs_info->buffer_radix,
- eb->start >> PAGE_SHIFT);
+ eb->start >> fs_info->sectorsize_bits);
spin_unlock(&fs_info->buffer_lock);
} else {
spin_unlock(&eb->refs_lock);
@@ -5622,12 +5703,12 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
struct page *page;
char *kaddr;
char *dst = (char *)dstv;
- unsigned long i = start >> PAGE_SHIFT;
+ unsigned long i = get_eb_page_index(start);
if (check_eb_range(eb, start, len))
return;
- offset = offset_in_page(start);
+ offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
page = eb->pages[i];
@@ -5652,13 +5733,13 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
struct page *page;
char *kaddr;
char __user *dst = (char __user *)dstv;
- unsigned long i = start >> PAGE_SHIFT;
+ unsigned long i = get_eb_page_index(start);
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = offset_in_page(start);
+ offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
page = eb->pages[i];
@@ -5687,13 +5768,13 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
struct page *page;
char *kaddr;
char *ptr = (char *)ptrv;
- unsigned long i = start >> PAGE_SHIFT;
+ unsigned long i = get_eb_page_index(start);
int ret = 0;
if (check_eb_range(eb, start, len))
return -EINVAL;
- offset = offset_in_page(start);
+ offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
page = eb->pages[i];
@@ -5719,7 +5800,7 @@ void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
char *kaddr;
WARN_ON(!PageUptodate(eb->pages[0]));
- kaddr = page_address(eb->pages[0]);
+ kaddr = page_address(eb->pages[0]) + get_eb_offset_in_page(eb, 0);
memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
BTRFS_FSID_SIZE);
}
@@ -5729,7 +5810,7 @@ void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
char *kaddr;
WARN_ON(!PageUptodate(eb->pages[0]));
- kaddr = page_address(eb->pages[0]);
+ kaddr = page_address(eb->pages[0]) + get_eb_offset_in_page(eb, 0);
memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
BTRFS_FSID_SIZE);
}
@@ -5742,12 +5823,12 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
struct page *page;
char *kaddr;
char *src = (char *)srcv;
- unsigned long i = start >> PAGE_SHIFT;
+ unsigned long i = get_eb_page_index(start);
if (check_eb_range(eb, start, len))
return;
- offset = offset_in_page(start);
+ offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
page = eb->pages[i];
@@ -5771,12 +5852,12 @@ void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
size_t offset;
struct page *page;
char *kaddr;
- unsigned long i = start >> PAGE_SHIFT;
+ unsigned long i = get_eb_page_index(start);
if (check_eb_range(eb, start, len))
return;
- offset = offset_in_page(start);
+ offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
page = eb->pages[i];
@@ -5800,10 +5881,20 @@ void copy_extent_buffer_full(const struct extent_buffer *dst,
ASSERT(dst->len == src->len);
- num_pages = num_extent_pages(dst);
- for (i = 0; i < num_pages; i++)
- copy_page(page_address(dst->pages[i]),
- page_address(src->pages[i]));
+ if (dst->fs_info->sectorsize == PAGE_SIZE) {
+ num_pages = num_extent_pages(dst);
+ for (i = 0; i < num_pages; i++)
+ copy_page(page_address(dst->pages[i]),
+ page_address(src->pages[i]));
+ } else {
+ size_t src_offset = get_eb_offset_in_page(src, 0);
+ size_t dst_offset = get_eb_offset_in_page(dst, 0);
+
+ ASSERT(src->fs_info->sectorsize < PAGE_SIZE);
+ memcpy(page_address(dst->pages[0]) + dst_offset,
+ page_address(src->pages[0]) + src_offset,
+ src->len);
+ }
}
void copy_extent_buffer(const struct extent_buffer *dst,
@@ -5816,7 +5907,7 @@ void copy_extent_buffer(const struct extent_buffer *dst,
size_t offset;
struct page *page;
char *kaddr;
- unsigned long i = dst_offset >> PAGE_SHIFT;
+ unsigned long i = get_eb_page_index(dst_offset);
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(src, src_offset, len))
@@ -5824,7 +5915,7 @@ void copy_extent_buffer(const struct extent_buffer *dst,
WARN_ON(src->len != dst_len);
- offset = offset_in_page(dst_offset);
+ offset = get_eb_offset_in_page(dst, dst_offset);
while (len > 0) {
page = dst->pages[i];
@@ -5868,7 +5959,7 @@ static inline void eb_bitmap_offset(const struct extent_buffer *eb,
* the bitmap item in the extent buffer + the offset of the byte in the
* bitmap item.
*/
- offset = start + byte_offset;
+ offset = start + offset_in_page(eb->start) + byte_offset;
*page_index = offset >> PAGE_SHIFT;
*page_offset = offset_in_page(offset);
@@ -6022,11 +6113,11 @@ void memcpy_extent_buffer(const struct extent_buffer *dst,
return;
while (len > 0) {
- dst_off_in_page = offset_in_page(dst_offset);
- src_off_in_page = offset_in_page(src_offset);
+ dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
+ src_off_in_page = get_eb_offset_in_page(dst, src_offset);
- dst_i = dst_offset >> PAGE_SHIFT;
- src_i = src_offset >> PAGE_SHIFT;
+ dst_i = get_eb_page_index(dst_offset);
+ src_i = get_eb_page_index(src_offset);
cur = min(len, (unsigned long)(PAGE_SIZE -
src_off_in_page));
@@ -6062,11 +6153,11 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
return;
}
while (len > 0) {
- dst_i = dst_end >> PAGE_SHIFT;
- src_i = src_end >> PAGE_SHIFT;
+ dst_i = get_eb_page_index(dst_end);
+ src_i = get_eb_page_index(src_end);
- dst_off_in_page = offset_in_page(dst_end);
- src_off_in_page = offset_in_page(src_end);
+ dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
+ src_off_in_page = get_eb_offset_in_page(dst, src_end);
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
@@ -6121,3 +6212,54 @@ int try_release_extent_buffer(struct page *page)
return release_extent_buffer(eb);
}
+
+/*
+ * btrfs_readahead_tree_block - attempt to readahead a child block
+ * @fs_info: the fs_info
+ * @bytenr: bytenr to read
+ * @owner_root: objectid of the root that owns this eb
+ * @gen: generation for the uptodate check, can be 0
+ * @level: level for the eb
+ *
+ * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
+ * normal uptodate check of the eb, without checking the generation. If we have
+ * to read the block we will not block on anything.
+ */
+void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 owner_root, u64 gen, int level)
+{
+ struct extent_buffer *eb;
+ int ret;
+
+ eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
+ if (IS_ERR(eb))
+ return;
+
+ if (btrfs_buffer_uptodate(eb, gen, 1)) {
+ free_extent_buffer(eb);
+ return;
+ }
+
+ ret = read_extent_buffer_pages(eb, WAIT_NONE, 0);
+ if (ret < 0)
+ free_extent_buffer_stale(eb);
+ else
+ free_extent_buffer(eb);
+}
+
+/*
+ * btrfs_readahead_node_child - readahead a node's child block
+ * @node: parent node we're reading from
+ * @slot: slot in the parent node for the child we want to read
+ *
+ * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
+ * the slot in the node provided.
+ */
+void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
+{
+ btrfs_readahead_tree_block(node->fs_info,
+ btrfs_node_blockptr(node, slot),
+ btrfs_header_owner(node),
+ btrfs_node_ptr_generation(node, slot),
+ btrfs_header_level(node) - 1);
+}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index f39d02e7f7ef..19221095c635 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -6,6 +6,7 @@
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/fiemap.h>
+#include <linux/btrfs_tree.h>
#include "ulist.h"
/*
@@ -71,11 +72,10 @@ typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
int mirror_num,
unsigned long bio_flags);
-typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
- struct bio *bio, u64 bio_offset);
+typedef blk_status_t (extent_submit_bio_start_t)(struct inode *inode,
+ struct bio *bio, u64 dio_file_offset);
-#define INLINE_EXTENT_BUFFER_PAGES 16
-#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
+#define INLINE_EXTENT_BUFFER_PAGES (BTRFS_MAX_METADATA_BLOCKSIZE / PAGE_SIZE)
struct extent_buffer {
u64 start;
unsigned long len;
@@ -87,31 +87,13 @@ struct extent_buffer {
int read_mirror;
struct rcu_head rcu_head;
pid_t lock_owner;
-
- int blocking_writers;
- atomic_t blocking_readers;
- bool lock_recursed;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
- short log_index;
-
- /* protects write locks */
- rwlock_t lock;
+ s8 log_index;
- /* readers use lock_wq while they wait for the write
- * lock holders to unlock
- */
- wait_queue_head_t write_lock_wq;
+ struct rw_semaphore lock;
- /* writers use read_lock_wq while they wait for readers
- * to unlock
- */
- wait_queue_head_t read_lock_wq;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG
- int spinning_writers;
- atomic_t spinning_readers;
- atomic_t read_locks;
- int write_locks;
struct list_head leak_list;
#endif
};
@@ -199,7 +181,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
- u64 start);
+ u64 start, u64 owner_root, int level);
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
@@ -215,11 +197,20 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
+void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 owner_root, u64 gen, int level);
+void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
static inline int num_extent_pages(const struct extent_buffer *eb)
{
- return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
- (eb->start >> PAGE_SHIFT);
+ /*
+ * For sectorsize == PAGE_SIZE case, since nodesize is always aligned to
+ * sectorsize, it's just eb->len >> PAGE_SHIFT.
+ *
+ * For sectorsize < PAGE_SIZE case, we could have nodesize < PAGE_SIZE,
+ * thus have to ensure we get at least one page.
+ */
+ return (eb->len >> PAGE_SHIFT) ?: 1;
}
static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
@@ -270,8 +261,7 @@ void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
- unsigned bits_to_clear,
- unsigned long page_ops);
+ u32 bits_to_clear, unsigned long page_ops);
struct bio *btrfs_bio_alloc(u64 first_byte);
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
struct bio *btrfs_bio_clone(struct bio *bio);
@@ -307,7 +297,7 @@ struct io_failure_record {
blk_status_t btrfs_submit_read_repair(struct inode *inode,
- struct bio *failed_bio, u64 phy_offset,
+ struct bio *failed_bio, u32 bio_offset,
struct page *page, unsigned int pgoff,
u64 start, u64 end, int failed_mirror,
submit_bio_hook_t *submit_bio_hook);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 8f4f2bd6d9b9..1545c22ef280 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -38,27 +38,27 @@
* Finally new_i_size should only be set in the case of truncate where we're not
* ready to use i_size_read() as the limiter yet.
*/
-void btrfs_inode_safe_disk_i_size_write(struct inode *inode, u64 new_i_size)
+void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
{
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start, end, i_size;
int ret;
- i_size = new_i_size ?: i_size_read(inode);
+ i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
- BTRFS_I(inode)->disk_i_size = i_size;
+ inode->disk_i_size = i_size;
return;
}
- spin_lock(&BTRFS_I(inode)->lock);
- ret = find_contiguous_extent_bit(&BTRFS_I(inode)->file_extent_tree, 0,
- &start, &end, EXTENT_DIRTY);
+ spin_lock(&inode->lock);
+ ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
+ &end, EXTENT_DIRTY);
if (!ret && start == 0)
i_size = min(i_size, end + 1);
else
i_size = 0;
- BTRFS_I(inode)->disk_i_size = i_size;
- spin_unlock(&BTRFS_I(inode)->lock);
+ inode->disk_i_size = i_size;
+ spin_unlock(&inode->lock);
}
/**
@@ -142,7 +142,6 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
file_key.offset = pos;
file_key.type = BTRFS_EXTENT_DATA_KEY;
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &file_key,
sizeof(*item));
if (ret < 0)
@@ -181,7 +180,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_csum_item *item;
struct extent_buffer *leaf;
u64 csum_offset = 0;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csum_size = fs_info->csum_size;
int csums_in_item;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -201,7 +200,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
goto fail;
csum_offset = (bytenr - found_key.offset) >>
- fs_info->sb->s_blocksize_bits;
+ fs_info->sectorsize_bits;
csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
csums_in_item /= csum_size;
@@ -239,12 +238,117 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
return ret;
}
+/*
+ * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and
+ * estore the result to @dst.
+ *
+ * Return >0 for the number of sectors we found.
+ * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum
+ * for it. Caller may want to try next sector until one range is hit.
+ * Return <0 for fatal error.
+ */
+static int search_csum_tree(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path, u64 disk_bytenr,
+ u64 len, u8 *dst)
+{
+ struct btrfs_csum_item *item = NULL;
+ struct btrfs_key key;
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 csum_size = fs_info->csum_size;
+ u32 itemsize;
+ int ret;
+ u64 csum_start;
+ u64 csum_len;
+
+ ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) &&
+ IS_ALIGNED(len, sectorsize));
+
+ /* Check if the current csum item covers disk_bytenr */
+ if (path->nodes[0]) {
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_csum_item);
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ itemsize = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
+
+ csum_start = key.offset;
+ csum_len = (itemsize / csum_size) * sectorsize;
+
+ if (in_range(disk_bytenr, csum_start, csum_len))
+ goto found;
+ }
+
+ /* Current item doesn't contain the desired range, search again */
+ btrfs_release_path(path);
+ item = btrfs_lookup_csum(NULL, fs_info->csum_root, path, disk_bytenr, 0);
+ if (IS_ERR(item)) {
+ ret = PTR_ERR(item);
+ goto out;
+ }
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ itemsize = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
+
+ csum_start = key.offset;
+ csum_len = (itemsize / csum_size) * sectorsize;
+ ASSERT(in_range(disk_bytenr, csum_start, csum_len));
+
+found:
+ ret = (min(csum_start + csum_len, disk_bytenr + len) -
+ disk_bytenr) >> fs_info->sectorsize_bits;
+ read_extent_buffer(path->nodes[0], dst, (unsigned long)item,
+ ret * csum_size);
+out:
+ if (ret == -ENOENT)
+ ret = 0;
+ return ret;
+}
+
+/*
+ * Locate the file_offset of @cur_disk_bytenr of a @bio.
+ *
+ * Bio of btrfs represents read range of
+ * [bi_sector << 9, bi_sector << 9 + bi_size).
+ * Knowing this, we can iterate through each bvec to locate the page belong to
+ * @cur_disk_bytenr and get the file offset.
+ *
+ * @inode is used to determine if the bvec page really belongs to @inode.
+ *
+ * Return 0 if we can't find the file offset
+ * Return >0 if we find the file offset and restore it to @file_offset_ret
+ */
+static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
+ u64 disk_bytenr, u64 *file_offset_ret)
+{
+ struct bvec_iter iter;
+ struct bio_vec bvec;
+ u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ int ret = 0;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ struct page *page = bvec.bv_page;
+
+ if (cur > disk_bytenr)
+ break;
+ if (cur + bvec.bv_len <= disk_bytenr) {
+ cur += bvec.bv_len;
+ continue;
+ }
+ ASSERT(in_range(disk_bytenr, cur, bvec.bv_len));
+ if (page->mapping && page->mapping->host &&
+ page->mapping->host == inode) {
+ ret = 1;
+ *file_offset_ret = page_offset(page) + bvec.bv_offset +
+ disk_bytenr - cur;
+ break;
+ }
+ }
+ return ret;
+}
+
/**
- * btrfs_lookup_bio_sums - Look up checksums for a bio.
+ * Lookup the checksum for the read bio in csum tree.
+ *
* @inode: inode that the bio is for.
* @bio: bio to look up.
- * @offset: Unless (u64)-1, look up checksums for this offset in the file.
- * If (u64)-1, use the page offsets from the bio instead.
* @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
* checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
* NULL, the checksum buffer is allocated and returned in
@@ -252,31 +356,40 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
*
* Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
*/
-blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
- u64 offset, u8 *dst)
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct bio_vec bvec;
- struct bvec_iter iter;
- struct btrfs_csum_item *item = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_path *path;
- const bool page_offsets = (offset == (u64)-1);
+ const u32 sectorsize = fs_info->sectorsize;
+ const u32 csum_size = fs_info->csum_size;
+ u32 orig_len = bio->bi_iter.bi_size;
+ u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 cur_disk_bytenr;
u8 *csum;
- u64 item_start_offset = 0;
- u64 item_last_offset = 0;
- u64 disk_bytenr;
- u64 page_bytes_left;
- u32 diff;
- int nblocks;
+ const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
int count = 0;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ if (!fs_info->csum_root || (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+ return BLK_STS_OK;
+
+ /*
+ * This function is only called for read bio.
+ *
+ * This means two things:
+ * - All our csums should only be in csum tree
+ * No ordered extents csums, as ordered extents are only for write
+ * path.
+ * - No need to bother any other info from bvec
+ * Since we're looking up csums, the only important info is the
+ * disk_bytenr and the length, which can be extracted from bi_iter
+ * directly.
+ */
+ ASSERT(bio_op(bio) == REQ_OP_READ);
path = btrfs_alloc_path();
if (!path)
return BLK_STS_RESOURCE;
- nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) {
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
@@ -295,7 +408,11 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
csum = dst;
}
- if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
+ /*
+ * If requested number of sectors is larger than one leaf can contain,
+ * kick the readahead for csum tree.
+ */
+ if (nblocks > fs_info->csums_per_leaf)
path->reada = READA_FORWARD;
/*
@@ -309,85 +426,62 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
path->skip_locking = 1;
}
- disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
+ for (cur_disk_bytenr = orig_disk_bytenr;
+ cur_disk_bytenr < orig_disk_bytenr + orig_len;
+ cur_disk_bytenr += (count * sectorsize)) {
+ u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr;
+ unsigned int sector_offset;
+ u8 *csum_dst;
- bio_for_each_segment(bvec, bio, iter) {
- page_bytes_left = bvec.bv_len;
- if (count)
- goto next;
-
- if (page_offsets)
- offset = page_offset(bvec.bv_page) + bvec.bv_offset;
- count = btrfs_find_ordered_sum(BTRFS_I(inode), offset,
- disk_bytenr, csum, nblocks);
- if (count)
- goto found;
+ /*
+ * Although both cur_disk_bytenr and orig_disk_bytenr is u64,
+ * we're calculating the offset to the bio start.
+ *
+ * Bio size is limited to UINT_MAX, thus unsigned int is large
+ * enough to contain the raw result, not to mention the right
+ * shifted result.
+ */
+ ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
+ sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
+ fs_info->sectorsize_bits;
+ csum_dst = csum + sector_offset * csum_size;
+
+ count = search_csum_tree(fs_info, path, cur_disk_bytenr,
+ search_len, csum_dst);
+ if (count <= 0) {
+ /*
+ * Either we hit a critical error or we didn't find
+ * the csum.
+ * Either way, we put zero into the csums dst, and skip
+ * to the next sector.
+ */
+ memset(csum_dst, 0, csum_size);
+ count = 1;
- if (!item || disk_bytenr < item_start_offset ||
- disk_bytenr >= item_last_offset) {
- struct btrfs_key found_key;
- u32 item_size;
-
- if (item)
- btrfs_release_path(path);
- item = btrfs_lookup_csum(NULL, fs_info->csum_root,
- path, disk_bytenr, 0);
- if (IS_ERR(item)) {
- count = 1;
- memset(csum, 0, csum_size);
- if (BTRFS_I(inode)->root->root_key.objectid ==
- BTRFS_DATA_RELOC_TREE_OBJECTID) {
- set_extent_bits(io_tree, offset,
- offset + fs_info->sectorsize - 1,
+ /*
+ * For data reloc inode, we need to mark the range
+ * NODATASUM so that balance won't report false csum
+ * error.
+ */
+ if (BTRFS_I(inode)->root->root_key.objectid ==
+ BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ u64 file_offset;
+ int ret;
+
+ ret = search_file_offset_in_bio(bio, inode,
+ cur_disk_bytenr, &file_offset);
+ if (ret)
+ set_extent_bits(io_tree, file_offset,
+ file_offset + sectorsize - 1,
EXTENT_NODATASUM);
- } else {
- btrfs_info_rl(fs_info,
- "no csum found for inode %llu start %llu",
- btrfs_ino(BTRFS_I(inode)), offset);
- }
- item = NULL;
- btrfs_release_path(path);
- goto found;
+ } else {
+ btrfs_warn_rl(fs_info,
+ "csum hole found for disk bytenr range [%llu, %llu)",
+ cur_disk_bytenr, cur_disk_bytenr + sectorsize);
}
- btrfs_item_key_to_cpu(path->nodes[0], &found_key,
- path->slots[0]);
-
- item_start_offset = found_key.offset;
- item_size = btrfs_item_size_nr(path->nodes[0],
- path->slots[0]);
- item_last_offset = item_start_offset +
- (item_size / csum_size) *
- fs_info->sectorsize;
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_csum_item);
- }
- /*
- * this byte range must be able to fit inside
- * a single leaf so it will also fit inside a u32
- */
- diff = disk_bytenr - item_start_offset;
- diff = diff / fs_info->sectorsize;
- diff = diff * csum_size;
- count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
- inode->i_sb->s_blocksize_bits);
- read_extent_buffer(path->nodes[0], csum,
- ((unsigned long)item) + diff,
- csum_size * count);
-found:
- csum += count * csum_size;
- nblocks -= count;
-next:
- while (count > 0) {
- count--;
- disk_bytenr += fs_info->sectorsize;
- offset += fs_info->sectorsize;
- page_bytes_left -= fs_info->sectorsize;
- if (!page_bytes_left)
- break; /* move to next bio */
}
}
- WARN_ON_ONCE(count);
btrfs_free_path(path);
return BLK_STS_OK;
}
@@ -406,7 +500,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
int ret;
size_t size;
u64 csum_end;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csum_size = fs_info->csum_size;
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
@@ -433,8 +527,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) {
- offset = (start - key.offset) >>
- fs_info->sb->s_blocksize_bits;
+ offset = (start - key.offset) >> fs_info->sectorsize_bits;
if (offset * csum_size <
btrfs_item_size_nr(leaf, path->slots[0] - 1))
path->slots[0]--;
@@ -484,10 +577,9 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
sums->bytenr = start;
sums->len = (int)size;
- offset = (start - key.offset) >>
- fs_info->sb->s_blocksize_bits;
+ offset = (start - key.offset) >> fs_info->sectorsize_bits;
offset *= csum_size;
- size >>= fs_info->sb->s_blocksize_bits;
+ size >>= fs_info->sectorsize_bits;
read_extent_buffer(path->nodes[0],
sums->sums,
@@ -539,7 +631,6 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
int i;
u64 offset;
unsigned nofs_flag;
- const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
nofs_flag = memalloc_nofs_save();
sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
@@ -557,7 +648,7 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
else
offset = 0; /* shut up gcc */
- sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
+ sums->bytenr = bio->bi_iter.bi_sector << 9;
index = 0;
shash->tfm = fs_info->csum_shash;
@@ -596,7 +687,7 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
ordered = btrfs_lookup_ordered_extent(inode,
offset);
ASSERT(ordered); /* Logic error */
- sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
+ sums->bytenr = (bio->bi_iter.bi_sector << 9)
+ total_bytes;
index = 0;
}
@@ -607,7 +698,7 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
fs_info->sectorsize,
sums->sums + index);
kunmap_atomic(data);
- index += csum_size;
+ index += fs_info->csum_size;
offset += fs_info->sectorsize;
this_sum_bytes += fs_info->sectorsize;
total_bytes += fs_info->sectorsize;
@@ -637,14 +728,14 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 len)
{
struct extent_buffer *leaf;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csum_size = fs_info->csum_size;
u64 csum_end;
u64 end_byte = bytenr + len;
- u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
+ u32 blocksize_bits = fs_info->sectorsize_bits;
leaf = path->nodes[0];
csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
- csum_end <<= fs_info->sb->s_blocksize_bits;
+ csum_end <<= blocksize_bits;
csum_end += key->offset;
if (key->offset < bytenr && csum_end <= end_byte) {
@@ -691,8 +782,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
u64 csum_end;
struct extent_buffer *leaf;
int ret;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
- int blocksize_bits = fs_info->sb->s_blocksize_bits;
+ const u32 csum_size = fs_info->csum_size;
+ u32 blocksize_bits = fs_info->sectorsize_bits;
ASSERT(root == fs_info->csum_root ||
root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
@@ -706,7 +797,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
key.type = BTRFS_EXTENT_CSUM_KEY;
- path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
if (path->slots[0] == 0)
@@ -846,7 +936,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
int index = 0;
int found_next;
int ret;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csum_size = fs_info->csum_size;
path = btrfs_alloc_path();
if (!path)
@@ -921,7 +1011,7 @@ again:
if (btrfs_leaf_free_space(leaf) >= csum_size) {
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
csum_offset = (bytenr - found_key.offset) >>
- fs_info->sb->s_blocksize_bits;
+ fs_info->sectorsize_bits;
goto extend_csum;
}
@@ -939,8 +1029,7 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- csum_offset = (bytenr - found_key.offset) >>
- fs_info->sb->s_blocksize_bits;
+ csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits;
if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
@@ -956,7 +1045,7 @@ extend_csum:
u32 diff;
tmp = sums->len - total_bytes;
- tmp >>= fs_info->sb->s_blocksize_bits;
+ tmp >>= fs_info->sectorsize_bits;
WARN_ON(tmp < 1);
extend_nr = max_t(int, 1, (int)tmp);
@@ -981,9 +1070,9 @@ insert:
u64 tmp;
tmp = sums->len - total_bytes;
- tmp >>= fs_info->sb->s_blocksize_bits;
+ tmp >>= fs_info->sectorsize_bits;
tmp = min(tmp, (next_offset - file_key.offset) >>
- fs_info->sb->s_blocksize_bits);
+ fs_info->sectorsize_bits);
tmp = max_t(u64, 1, tmp);
tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
@@ -991,10 +1080,8 @@ insert:
} else {
ins_size = csum_size;
}
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &file_key,
ins_size);
- path->leave_spinning = 0;
if (ret < 0)
goto out;
if (WARN_ON(ret != 0))
@@ -1007,8 +1094,7 @@ csum:
item = (struct btrfs_csum_item *)((unsigned char *)item +
csum_offset * csum_size);
found:
- ins_size = (u32)(sums->len - total_bytes) >>
- fs_info->sb->s_blocksize_bits;
+ ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits;
ins_size *= csum_size;
ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
ins_size);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 4373da7bcc0d..0e41459b8de6 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -462,7 +462,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
*/
int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
- struct extent_state **cached)
+ struct extent_state **cached, bool noreserve)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int err = 0;
@@ -474,7 +474,13 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
loff_t isize = i_size_read(&inode->vfs_inode);
unsigned int extra_bits = 0;
- start_pos = pos & ~((u64) fs_info->sectorsize - 1);
+ if (write_bytes == 0)
+ return 0;
+
+ if (noreserve)
+ extra_bits |= EXTENT_NORESERVE;
+
+ start_pos = round_down(pos, fs_info->sectorsize);
num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize);
@@ -671,14 +677,16 @@ next:
* If an extent intersects the range but is not entirely inside the range
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
+ *
+ * Note: the VFS' inode number of bytes is not updated, it's up to the caller
+ * to deal with that. We set the field 'bytes_found' of the arguments structure
+ * with the number of allocated bytes found in the target range, so that the
+ * caller can update the inode's number of bytes in an atomic way when
+ * replacing extents in a range to avoid races with stat(2).
*/
-int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode,
- struct btrfs_path *path, u64 start, u64 end,
- u64 *drop_end, int drop_cache,
- int replace_extent,
- u32 extent_item_size,
- int *key_inserted)
+int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_inode *inode,
+ struct btrfs_drop_extents_args *args)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
@@ -686,14 +694,13 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_ref ref = { 0 };
struct btrfs_key key;
struct btrfs_key new_key;
- struct inode *vfs_inode = &inode->vfs_inode;
u64 ino = btrfs_ino(inode);
- u64 search_start = start;
+ u64 search_start = args->start;
u64 disk_bytenr = 0;
u64 num_bytes = 0;
u64 extent_offset = 0;
u64 extent_end = 0;
- u64 last_end = start;
+ u64 last_end = args->start;
int del_nr = 0;
int del_slot = 0;
int extent_type;
@@ -703,11 +710,26 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
int update_refs;
int found = 0;
int leafs_visited = 0;
+ struct btrfs_path *path = args->path;
+
+ args->bytes_found = 0;
+ args->extent_inserted = false;
- if (drop_cache)
- btrfs_drop_extent_cache(inode, start, end - 1, 0);
+ /* Must always have a path if ->replace_extent is true */
+ ASSERT(!(args->replace_extent && !args->path));
- if (start >= inode->disk_i_size && !replace_extent)
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (args->drop_cache)
+ btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
+
+ if (args->start >= inode->disk_i_size && !args->replace_extent)
modify_tree = 0;
update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
@@ -718,7 +740,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
search_start, modify_tree);
if (ret < 0)
break;
- if (ret > 0 && path->slots[0] > 0 && search_start == start) {
+ if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
if (key.objectid == ino &&
@@ -753,7 +775,7 @@ next_slot:
path->slots[0]++;
goto next_slot;
}
- if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+ if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
break;
fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -795,7 +817,7 @@ next_slot:
}
found = 1;
- search_start = max(key.offset, start);
+ search_start = max(key.offset, args->start);
if (recow || !modify_tree) {
modify_tree = -1;
btrfs_release_path(path);
@@ -806,7 +828,7 @@ next_slot:
* | - range to drop - |
* | -------- extent -------- |
*/
- if (start > key.offset && end < extent_end) {
+ if (args->start > key.offset && args->end < extent_end) {
BUG_ON(del_nr > 0);
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
@@ -814,7 +836,7 @@ next_slot:
}
memcpy(&new_key, &key, sizeof(new_key));
- new_key.offset = start;
+ new_key.offset = args->start;
ret = btrfs_duplicate_item(trans, root, path,
&new_key);
if (ret == -EAGAIN) {
@@ -828,15 +850,15 @@ next_slot:
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
- start - key.offset);
+ args->start - key.offset);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- extent_offset += start - key.offset;
+ extent_offset += args->start - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - start);
+ extent_end - args->start);
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
@@ -846,11 +868,11 @@ next_slot:
btrfs_init_data_ref(&ref,
root->root_key.objectid,
new_key.objectid,
- start - extent_offset);
+ args->start - extent_offset);
ret = btrfs_inc_extent_ref(trans, &ref);
BUG_ON(ret); /* -ENOMEM */
}
- key.offset = start;
+ key.offset = args->start;
}
/*
* From here on out we will have actually dropped something, so
@@ -862,23 +884,23 @@ next_slot:
* | ---- range to drop ----- |
* | -------- extent -------- |
*/
- if (start <= key.offset && end < extent_end) {
+ if (args->start <= key.offset && args->end < extent_end) {
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
break;
}
memcpy(&new_key, &key, sizeof(new_key));
- new_key.offset = end;
+ new_key.offset = args->end;
btrfs_set_item_key_safe(fs_info, path, &new_key);
- extent_offset += end - key.offset;
+ extent_offset += args->end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - end);
+ extent_end - args->end);
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0)
- inode_sub_bytes(vfs_inode, end - key.offset);
+ args->bytes_found += args->end - key.offset;
break;
}
@@ -887,7 +909,7 @@ next_slot:
* | ---- range to drop ----- |
* | -------- extent -------- |
*/
- if (start > key.offset && end >= extent_end) {
+ if (args->start > key.offset && args->end >= extent_end) {
BUG_ON(del_nr > 0);
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
ret = -EOPNOTSUPP;
@@ -895,11 +917,11 @@ next_slot:
}
btrfs_set_file_extent_num_bytes(leaf, fi,
- start - key.offset);
+ args->start - key.offset);
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0)
- inode_sub_bytes(vfs_inode, extent_end - start);
- if (end == extent_end)
+ args->bytes_found += extent_end - args->start;
+ if (args->end == extent_end)
break;
path->slots[0]++;
@@ -910,7 +932,7 @@ next_slot:
* | ---- range to drop ----- |
* | ------ extent ------ |
*/
- if (start <= key.offset && end >= extent_end) {
+ if (args->start <= key.offset && args->end >= extent_end) {
delete_extent_item:
if (del_nr == 0) {
del_slot = path->slots[0];
@@ -922,8 +944,7 @@ delete_extent_item:
if (update_refs &&
extent_type == BTRFS_FILE_EXTENT_INLINE) {
- inode_sub_bytes(vfs_inode,
- extent_end - key.offset);
+ args->bytes_found += extent_end - key.offset;
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
@@ -936,11 +957,10 @@ delete_extent_item:
key.offset - extent_offset);
ret = btrfs_free_extent(trans, &ref);
BUG_ON(ret); /* -ENOMEM */
- inode_sub_bytes(vfs_inode,
- extent_end - key.offset);
+ args->bytes_found += extent_end - key.offset;
}
- if (end == extent_end)
+ if (args->end == extent_end)
break;
if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
@@ -970,7 +990,7 @@ delete_extent_item:
* Set path->slots[0] to first slot, so that after the delete
* if items are move off from our leaf to its immediate left or
* right neighbor leafs, we end up with a correct and adjusted
- * path->slots[0] for our insertion (if replace_extent != 0).
+ * path->slots[0] for our insertion (if args->replace_extent).
*/
path->slots[0] = del_slot;
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
@@ -984,15 +1004,14 @@ delete_extent_item:
* which case it unlocked our path, so check path->locks[0] matches a
* write lock.
*/
- if (!ret && replace_extent && leafs_visited == 1 &&
- (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
- path->locks[0] == BTRFS_WRITE_LOCK) &&
+ if (!ret && args->replace_extent && leafs_visited == 1 &&
+ path->locks[0] == BTRFS_WRITE_LOCK &&
btrfs_leaf_free_space(leaf) >=
- sizeof(struct btrfs_item) + extent_item_size) {
+ sizeof(struct btrfs_item) + args->extent_item_size) {
key.objectid = ino;
key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = start;
+ key.offset = args->start;
if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
struct btrfs_key slot_key;
@@ -1000,30 +1019,18 @@ delete_extent_item:
if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
path->slots[0]++;
}
- setup_items_for_insert(root, path, &key, &extent_item_size, 1);
- *key_inserted = 1;
+ setup_items_for_insert(root, path, &key,
+ &args->extent_item_size, 1);
+ args->extent_inserted = true;
}
- if (!replace_extent || !(*key_inserted))
+ if (!args->path)
+ btrfs_free_path(path);
+ else if (!args->extent_inserted)
btrfs_release_path(path);
- if (drop_end)
- *drop_end = found ? min(end, last_end) : end;
- return ret;
-}
-
-int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode, u64 start,
- u64 end, int drop_cache)
-{
- struct btrfs_path *path;
- int ret;
+out:
+ args->drop_end = found ? min(args->end, last_end) : args->end;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path, start,
- end, NULL, drop_cache, 0, 0, NULL);
- btrfs_free_path(path);
return ret;
}
@@ -1558,11 +1565,84 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
btrfs_drew_write_unlock(&inode->root->snapshot_lock);
}
+static void update_time_for_write(struct inode *inode)
+{
+ struct timespec64 now;
+
+ if (IS_NOCMTIME(inode))
+ return;
+
+ now = current_time(inode);
+ if (!timespec64_equal(&inode->i_mtime, &now))
+ inode->i_mtime = now;
+
+ if (!timespec64_equal(&inode->i_ctime, &now))
+ inode->i_ctime = now;
+
+ if (IS_I_VERSION(inode))
+ inode_inc_iversion(inode);
+}
+
+static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
+ size_t count)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ loff_t pos = iocb->ki_pos;
+ int ret;
+ loff_t oldsize;
+ loff_t start_pos;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ size_t nocow_bytes = count;
+
+ /* We will allocate space in case nodatacow is not set, so bail */
+ if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
+ return -EAGAIN;
+ /*
+ * There are holes in the range or parts of the range that must
+ * be COWed (shared extents, RO block groups, etc), so just bail
+ * out.
+ */
+ if (nocow_bytes < count)
+ return -EAGAIN;
+ }
+
+ current->backing_dev_info = inode_to_bdi(inode);
+ ret = file_remove_privs(file);
+ if (ret)
+ return ret;
+
+ /*
+ * We reserve space for updating the inode when we reserve space for the
+ * extent we are going to write, so we will enospc out there. We don't
+ * need to start yet another transaction to update the inode as we will
+ * update the inode when we finish writing whatever data we write.
+ */
+ update_time_for_write(inode);
+
+ start_pos = round_down(pos, fs_info->sectorsize);
+ oldsize = i_size_read(inode);
+ if (start_pos > oldsize) {
+ /* Expand hole size to cover write data, preventing empty gap */
+ loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
+
+ ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
+ if (ret) {
+ current->backing_dev_info = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
struct iov_iter *i)
{
struct file *file = iocb->ki_filp;
- loff_t pos = iocb->ki_pos;
+ loff_t pos;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct page **pages = NULL;
@@ -1572,17 +1652,37 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
u64 lockend;
size_t num_written = 0;
int nrptrs;
- int ret = 0;
+ ssize_t ret;
bool only_release_metadata = false;
bool force_page_uptodate = false;
+ loff_t old_isize = i_size_read(inode);
+ unsigned int ilock_flags = 0;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ ilock_flags |= BTRFS_ILOCK_TRY;
+
+ ret = btrfs_inode_lock(inode, ilock_flags);
+ if (ret < 0)
+ return ret;
+
+ ret = generic_write_checks(iocb, i);
+ if (ret <= 0)
+ goto out;
+ ret = btrfs_write_check(iocb, i, ret);
+ if (ret < 0)
+ goto out;
+
+ pos = iocb->ki_pos;
nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
PAGE_SIZE / (sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ if (!pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
while (iov_iter_count(i) > 0) {
struct extent_state *cached_state = NULL;
@@ -1591,8 +1691,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_SIZE -
offset);
- size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_SIZE);
+ size_t num_pages;
size_t reserve_bytes;
size_t dirty_pages;
size_t copied;
@@ -1600,8 +1699,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
size_t num_sectors;
int extents_locked;
- WARN_ON(num_pages > nrptrs);
-
/*
* Fault pages before locking them in prepare_pages
* to avoid recursive lock
@@ -1613,35 +1710,28 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
only_release_metadata = false;
sector_offset = pos & (fs_info->sectorsize - 1);
- reserve_bytes = round_up(write_bytes + sector_offset,
- fs_info->sectorsize);
extent_changeset_release(data_reserved);
ret = btrfs_check_data_free_space(BTRFS_I(inode),
&data_reserved, pos,
write_bytes);
if (ret < 0) {
+ /*
+ * If we don't have to COW at the offset, reserve
+ * metadata only. write_bytes may get smaller than
+ * requested here.
+ */
if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
- &write_bytes) > 0) {
- /*
- * For nodata cow case, no need to reserve
- * data space.
- */
+ &write_bytes) > 0)
only_release_metadata = true;
- /*
- * our prealloc extent may be smaller than
- * write_bytes, so scale down.
- */
- num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_SIZE);
- reserve_bytes = round_up(write_bytes +
- sector_offset,
- fs_info->sectorsize);
- } else {
+ else
break;
- }
}
+ num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
+ WARN_ON(num_pages > nrptrs);
+ reserve_bytes = round_up(write_bytes + sector_offset,
+ fs_info->sectorsize);
WARN_ON(reserve_bytes == 0);
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
reserve_bytes);
@@ -1710,8 +1800,7 @@ again:
if (num_sectors > dirty_sectors) {
/* release everything except the sectors we dirtied */
- release_bytes -= dirty_sectors <<
- fs_info->sb->s_blocksize_bits;
+ release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
if (only_release_metadata) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
release_bytes, true);
@@ -1730,10 +1819,9 @@ again:
release_bytes = round_up(copied + sector_offset,
fs_info->sectorsize);
- if (copied > 0)
- ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
- dirty_pages, pos, copied,
- &cached_state);
+ ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
+ dirty_pages, pos, copied,
+ &cached_state, only_release_metadata);
/*
* If we have not locked the extent range, because the range's
@@ -1758,17 +1846,6 @@ again:
if (only_release_metadata)
btrfs_check_nocow_unlock(BTRFS_I(inode));
- if (only_release_metadata && copied > 0) {
- lockstart = round_down(pos,
- fs_info->sectorsize);
- lockend = round_up(pos + copied,
- fs_info->sectorsize) - 1;
-
- set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_NORESERVE, NULL,
- NULL, GFP_NOFS);
- }
-
btrfs_drop_pages(pages, num_pages);
cond_resched();
@@ -1795,24 +1872,102 @@ again:
}
extent_changeset_free(data_reserved);
+ if (num_written > 0) {
+ pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
+ iocb->ki_pos += num_written;
+ }
+out:
+ btrfs_inode_unlock(inode, ilock_flags);
return num_written ? num_written : ret;
}
-static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ const u32 blocksize_mask = fs_info->sectorsize - 1;
+
+ if (offset & blocksize_mask)
+ return -EINVAL;
+
+ if (iov_iter_alignment(iter) & blocksize_mask)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
loff_t pos;
- ssize_t written;
+ ssize_t written = 0;
ssize_t written_buffered;
loff_t endbyte;
- int err;
+ ssize_t err;
+ unsigned int ilock_flags = 0;
+ struct iomap_dio *dio = NULL;
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ ilock_flags |= BTRFS_ILOCK_TRY;
+
+ /* If the write DIO is within EOF, use a shared lock */
+ if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
+ ilock_flags |= BTRFS_ILOCK_SHARED;
+
+relock:
+ err = btrfs_inode_lock(inode, ilock_flags);
+ if (err < 0)
+ return err;
+
+ err = generic_write_checks(iocb, from);
+ if (err <= 0) {
+ btrfs_inode_unlock(inode, ilock_flags);
+ return err;
+ }
+
+ err = btrfs_write_check(iocb, from, err);
+ if (err < 0) {
+ btrfs_inode_unlock(inode, ilock_flags);
+ goto out;
+ }
+
+ pos = iocb->ki_pos;
+ /*
+ * Re-check since file size may have changed just before taking the
+ * lock or pos may have changed because of O_APPEND in generic_write_check()
+ */
+ if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
+ pos + iov_iter_count(from) > i_size_read(inode)) {
+ btrfs_inode_unlock(inode, ilock_flags);
+ ilock_flags &= ~BTRFS_ILOCK_SHARED;
+ goto relock;
+ }
- written = btrfs_direct_IO(iocb, from);
+ if (check_direct_IO(fs_info, from, pos)) {
+ btrfs_inode_unlock(inode, ilock_flags);
+ goto buffered;
+ }
- if (written < 0 || !iov_iter_count(from))
- return written;
+ dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops,
+ &btrfs_dio_ops, is_sync_kiocb(iocb));
+ btrfs_inode_unlock(inode, ilock_flags);
+
+ if (IS_ERR_OR_NULL(dio)) {
+ err = PTR_ERR_OR_ZERO(dio);
+ if (err < 0 && err != -ENOTBLK)
+ goto out;
+ } else {
+ written = iomap_dio_complete(dio);
+ }
+
+ if (written < 0 || !iov_iter_count(from)) {
+ err = written;
+ goto out;
+ }
+
+buffered:
pos = iocb->ki_pos;
written_buffered = btrfs_buffered_write(iocb, from);
if (written_buffered < 0) {
@@ -1838,24 +1993,6 @@ out:
return written ? written : err;
}
-static void update_time_for_write(struct inode *inode)
-{
- struct timespec64 now;
-
- if (IS_NOCMTIME(inode))
- return;
-
- now = current_time(inode);
- if (!timespec64_equal(&inode->i_mtime, &now))
- inode->i_mtime = now;
-
- if (!timespec64_equal(&inode->i_ctime, &now))
- inode->i_ctime = now;
-
- if (IS_I_VERSION(inode))
- inode_inc_iversion(inode);
-}
-
static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
@@ -1863,148 +2000,28 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 start_pos;
- u64 end_pos;
ssize_t num_written = 0;
const bool sync = iocb->ki_flags & IOCB_DSYNC;
- ssize_t err;
- loff_t pos;
- size_t count;
- loff_t oldsize;
- int clean_page = 0;
-
- if (!(iocb->ki_flags & IOCB_DIRECT) &&
- (iocb->ki_flags & IOCB_NOWAIT))
- return -EOPNOTSUPP;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock(inode))
- return -EAGAIN;
- } else {
- inode_lock(inode);
- }
-
- err = generic_write_checks(iocb, from);
- if (err <= 0) {
- inode_unlock(inode);
- return err;
- }
-
- pos = iocb->ki_pos;
- count = iov_iter_count(from);
- if (iocb->ki_flags & IOCB_NOWAIT) {
- size_t nocow_bytes = count;
-
- /*
- * We will allocate space in case nodatacow is not set,
- * so bail
- */
- if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes)
- <= 0) {
- inode_unlock(inode);
- return -EAGAIN;
- }
- /*
- * There are holes in the range or parts of the range that must
- * be COWed (shared extents, RO block groups, etc), so just bail
- * out.
- */
- if (nocow_bytes < count) {
- inode_unlock(inode);
- return -EAGAIN;
- }
- }
-
- current->backing_dev_info = inode_to_bdi(inode);
- err = file_remove_privs(file);
- if (err) {
- inode_unlock(inode);
- goto out;
- }
/*
- * If BTRFS flips readonly due to some impossible error
- * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
- * although we have opened a file as writable, we have
- * to stop this write operation to ensure FS consistency.
+ * If the fs flips readonly due to some impossible error, although we
+ * have opened a file as writable, we have to stop this write operation
+ * to ensure consistency.
*/
- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
- inode_unlock(inode);
- err = -EROFS;
- goto out;
- }
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ return -EROFS;
- /*
- * We reserve space for updating the inode when we reserve space for the
- * extent we are going to write, so we will enospc out there. We don't
- * need to start yet another transaction to update the inode as we will
- * update the inode when we finish writing whatever data we write.
- */
- update_time_for_write(inode);
-
- start_pos = round_down(pos, fs_info->sectorsize);
- oldsize = i_size_read(inode);
- if (start_pos > oldsize) {
- /* Expand hole size to cover write data, preventing empty gap */
- end_pos = round_up(pos + count,
- fs_info->sectorsize);
- err = btrfs_cont_expand(inode, oldsize, end_pos);
- if (err) {
- inode_unlock(inode);
- goto out;
- }
- if (start_pos > round_up(oldsize, fs_info->sectorsize))
- clean_page = 1;
- }
+ if (!(iocb->ki_flags & IOCB_DIRECT) &&
+ (iocb->ki_flags & IOCB_NOWAIT))
+ return -EOPNOTSUPP;
if (sync)
atomic_inc(&BTRFS_I(inode)->sync_writers);
- if (iocb->ki_flags & IOCB_DIRECT) {
- /*
- * 1. We must always clear IOCB_DSYNC in order to not deadlock
- * in iomap, as it calls generic_write_sync() in this case.
- * 2. If we are async, we can call iomap_dio_complete() either
- * in
- *
- * 2.1. A worker thread from the last bio completed. In this
- * case we need to mark the btrfs_dio_data that it is
- * async in order to call generic_write_sync() properly.
- * This is handled by setting BTRFS_DIO_SYNC_STUB in the
- * current->journal_info.
- * 2.2 The submitter context, because all IO completed
- * before we exited iomap_dio_rw(). In this case we can
- * just re-set the IOCB_DSYNC on the iocb and we'll do
- * the sync below. If our ->end_io() gets called and
- * current->journal_info is set, then we know we're in
- * our current context and we will clear
- * current->journal_info to indicate that we need to
- * sync below.
- */
- if (sync) {
- ASSERT(current->journal_info == NULL);
- iocb->ki_flags &= ~IOCB_DSYNC;
- current->journal_info = BTRFS_DIO_SYNC_STUB;
- }
- num_written = __btrfs_direct_write(iocb, from);
-
- /*
- * As stated above, we cleared journal_info, so we need to do
- * the sync ourselves.
- */
- if (sync && current->journal_info == NULL)
- iocb->ki_flags |= IOCB_DSYNC;
- current->journal_info = NULL;
- } else {
+ if (iocb->ki_flags & IOCB_DIRECT)
+ num_written = btrfs_direct_write(iocb, from);
+ else
num_written = btrfs_buffered_write(iocb, from);
- if (num_written > 0)
- iocb->ki_pos = pos + num_written;
- if (clean_page)
- pagecache_isize_extended(inode, oldsize,
- i_size_read(inode));
- }
-
- inode_unlock(inode);
/*
* We also have to set last_sub_trans to the current log transid,
@@ -2019,9 +2036,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (sync)
atomic_dec(&BTRFS_I(inode)->sync_writers);
-out:
+
current->backing_dev_info = NULL;
- return num_written ? num_written : err;
+ return num_written;
}
int btrfs_release_file(struct inode *inode, struct file *filp)
@@ -2116,13 +2133,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
inode_lock(inode);
- /*
- * We take the dio_sem here because the tree log stuff can race with
- * lockless dio writes and get an extent map logged for an extent we
- * never waited on. We need it this high up for lockdep reasons.
- */
- down_write(&BTRFS_I(inode)->dio_sem);
-
atomic_inc(&root->log_batch);
/*
@@ -2153,7 +2163,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
- up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2250,7 +2259,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
- up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
if (ret != BTRFS_NO_LOG_SYNC) {
@@ -2281,7 +2289,6 @@ out:
out_release_extents:
btrfs_release_log_ctx_extents(&ctx);
- up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2443,13 +2450,13 @@ out:
* em->start + em->len > start)
* When a hole extent is found, return 1 and modify start/len.
*/
-static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
+static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map *em;
int ret = 0;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
+ em = btrfs_get_extent(inode, NULL, 0,
round_down(*start, fs_info->sectorsize),
round_up(*len, fs_info->sectorsize));
if (IS_ERR(em))
@@ -2509,13 +2516,14 @@ static int btrfs_punch_hole_lock_range(struct inode *inode,
}
static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
- struct inode *inode,
+ struct btrfs_inode *inode,
struct btrfs_path *path,
struct btrfs_replace_extent_info *extent_info,
- const u64 replace_len)
+ const u64 replace_len,
+ const u64 bytes_to_drop)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *extent;
struct extent_buffer *leaf;
struct btrfs_key key;
@@ -2527,10 +2535,12 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
return 0;
if (extent_info->disk_offset == 0 &&
- btrfs_fs_incompat(fs_info, NO_HOLES))
+ btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
return 0;
+ }
- key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = extent_info->file_offset;
ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -2551,23 +2561,25 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
- extent_info->file_offset, replace_len);
+ ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
+ replace_len);
if (ret)
return ret;
/* If it's a hole, nothing more needs to be done. */
- if (extent_info->disk_offset == 0)
+ if (extent_info->disk_offset == 0) {
+ btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
return 0;
+ }
- inode_add_bytes(inode, replace_len);
+ btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
if (extent_info->is_new_extent && extent_info->insertions == 0) {
key.objectid = extent_info->disk_offset;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = extent_info->disk_len;
ret = btrfs_alloc_reserved_file_extent(trans, root,
- btrfs_ino(BTRFS_I(inode)),
+ btrfs_ino(inode),
extent_info->file_offset,
extent_info->qgroup_reserved,
&key);
@@ -2579,7 +2591,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
extent_info->disk_len, 0);
ref_offset = extent_info->file_offset - extent_info->data_offset;
btrfs_init_data_ref(&ref, root->root_key.objectid,
- btrfs_ino(BTRFS_I(inode)), ref_offset);
+ btrfs_ino(inode), ref_offset);
ret = btrfs_inc_extent_ref(trans, &ref);
}
@@ -2602,6 +2614,7 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
struct btrfs_replace_extent_info *extent_info,
struct btrfs_trans_handle **trans_out)
{
+ struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
u64 ino_size = round_up(inode->i_size, fs_info->sectorsize);
@@ -2610,7 +2623,6 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
struct btrfs_block_rsv *rsv;
unsigned int rsv_count;
u64 cur_offset;
- u64 drop_end;
u64 len = end - start;
int ret = 0;
@@ -2649,10 +2661,16 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
trans->block_rsv = rsv;
cur_offset = start;
+ drop_args.path = path;
+ drop_args.end = end + 1;
+ drop_args.drop_cache = true;
while (cur_offset < end) {
- ret = __btrfs_drop_extents(trans, root, BTRFS_I(inode), path,
- cur_offset, end + 1, &drop_end,
- 1, 0, 0, NULL);
+ drop_args.start = cur_offset;
+ ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
+ /* If we are punching a hole decrement the inode's byte count */
+ if (!extent_info)
+ btrfs_update_inode_bytes(BTRFS_I(inode), 0,
+ drop_args.bytes_found);
if (ret != -ENOSPC) {
/*
* When cloning we want to avoid transaction aborts when
@@ -2669,10 +2687,10 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
trans->block_rsv = &fs_info->trans_block_rsv;
- if (!extent_info && cur_offset < drop_end &&
+ if (!extent_info && cur_offset < drop_args.drop_end &&
cur_offset < ino_size) {
ret = fill_holes(trans, BTRFS_I(inode), path,
- cur_offset, drop_end);
+ cur_offset, drop_args.drop_end);
if (ret) {
/*
* If we failed then we didn't insert our hole
@@ -2683,7 +2701,7 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
btrfs_abort_transaction(trans, ret);
break;
}
- } else if (!extent_info && cur_offset < drop_end) {
+ } else if (!extent_info && cur_offset < drop_args.drop_end) {
/*
* We are past the i_size here, but since we didn't
* insert holes we need to clear the mapped area so we
@@ -2691,7 +2709,8 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
* file extent is inserted here.
*/
ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
- cur_offset, drop_end - cur_offset);
+ cur_offset,
+ drop_args.drop_end - cur_offset);
if (ret) {
/*
* We couldn't clear our area, so we could
@@ -2703,11 +2722,14 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
}
}
- if (extent_info && drop_end > extent_info->file_offset) {
- u64 replace_len = drop_end - extent_info->file_offset;
+ if (extent_info &&
+ drop_args.drop_end > extent_info->file_offset) {
+ u64 replace_len = drop_args.drop_end -
+ extent_info->file_offset;
- ret = btrfs_insert_replace_extent(trans, inode, path,
- extent_info, replace_len);
+ ret = btrfs_insert_replace_extent(trans, BTRFS_I(inode),
+ path, extent_info, replace_len,
+ drop_args.bytes_found);
if (ret) {
btrfs_abort_transaction(trans, ret);
break;
@@ -2717,9 +2739,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
extent_info->file_offset += replace_len;
}
- cur_offset = drop_end;
+ cur_offset = drop_args.drop_end;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret)
break;
@@ -2739,7 +2761,8 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
trans->block_rsv = rsv;
if (!extent_info) {
- ret = find_first_non_hole(inode, &cur_offset, &len);
+ ret = find_first_non_hole(BTRFS_I(inode), &cur_offset,
+ &len);
if (unlikely(ret < 0))
break;
if (ret && !len) {
@@ -2776,25 +2799,26 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
* will not record the existence of the hole region
* [existing_hole_start, lockend].
*/
- if (drop_end <= end)
- drop_end = end + 1;
+ if (drop_args.drop_end <= end)
+ drop_args.drop_end = end + 1;
/*
* Don't insert file hole extent item if it's for a range beyond eof
* (because it's useless) or if it represents a 0 bytes range (when
* cur_offset == drop_end).
*/
- if (!extent_info && cur_offset < ino_size && cur_offset < drop_end) {
+ if (!extent_info && cur_offset < ino_size &&
+ cur_offset < drop_args.drop_end) {
ret = fill_holes(trans, BTRFS_I(inode), path,
- cur_offset, drop_end);
+ cur_offset, drop_args.drop_end);
if (ret) {
/* Same comment as above. */
btrfs_abort_transaction(trans, ret);
goto out_trans;
}
- } else if (!extent_info && cur_offset < drop_end) {
+ } else if (!extent_info && cur_offset < drop_args.drop_end) {
/* See the comment in the loop above for the reasoning here. */
ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
- cur_offset, drop_end - cur_offset);
+ cur_offset, drop_args.drop_end - cur_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out_trans;
@@ -2802,8 +2826,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
}
if (extent_info) {
- ret = btrfs_insert_replace_extent(trans, inode, path, extent_info,
- extent_info->data_len);
+ ret = btrfs_insert_replace_extent(trans, BTRFS_I(inode), path,
+ extent_info, extent_info->data_len,
+ drop_args.bytes_found);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out_trans;
@@ -2849,7 +2874,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
inode_lock(inode);
ino_size = round_up(inode->i_size, fs_info->sectorsize);
- ret = find_first_non_hole(inode, &offset, &len);
+ ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
if (ret < 0)
goto out_only_mutex;
if (ret && !len) {
@@ -2874,7 +2899,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(inode, offset, len, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
+ 0);
} else {
ret = 0;
}
@@ -2884,7 +2910,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* zero back part of the first block */
if (offset < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(inode, offset, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
if (ret) {
inode_unlock(inode);
return ret;
@@ -2899,7 +2925,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* after truncate page, check hole again */
len = offset + len - lockstart;
offset = lockstart;
- ret = find_first_non_hole(inode, &offset, &len);
+ ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
if (ret < 0)
goto out_only_mutex;
if (ret && !len) {
@@ -2913,14 +2939,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
tail_start = lockend + 1;
tail_len = offset + len - tail_start;
if (tail_len) {
- ret = find_first_non_hole(inode, &tail_start, &tail_len);
+ ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
if (unlikely(ret < 0))
goto out_only_mutex;
if (!ret) {
/* zero the front end of the last page */
if (tail_start + tail_len < ino_size) {
truncated_block = true;
- ret = btrfs_truncate_block(inode,
+ ret = btrfs_truncate_block(BTRFS_I(inode),
tail_start + tail_len,
0, 1);
if (ret)
@@ -2954,7 +2980,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
@@ -2981,7 +3007,7 @@ out_only_mutex:
} else {
int ret2;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
if (!ret)
ret = ret2;
@@ -3049,8 +3075,8 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
inode->i_ctime = current_time(inode);
i_size_write(inode, end);
- btrfs_inode_safe_disk_i_size_write(inode, 0);
- ret = btrfs_update_inode(trans, root, inode);
+ btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
return ret ? ret : ret2;
@@ -3162,7 +3188,8 @@ static int btrfs_zero_range(struct inode *inode,
}
if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
free_extent_map(em);
- ret = btrfs_truncate_block(inode, offset, len, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
+ 0);
if (!ret)
ret = btrfs_fallocate_update_isize(inode,
offset + len,
@@ -3193,7 +3220,7 @@ static int btrfs_zero_range(struct inode *inode,
alloc_start = round_down(offset, sectorsize);
ret = 0;
} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
- ret = btrfs_truncate_block(inode, offset, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
if (ret)
goto out;
} else {
@@ -3210,7 +3237,8 @@ static int btrfs_zero_range(struct inode *inode,
alloc_end = round_up(offset + len, sectorsize);
ret = 0;
} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
- ret = btrfs_truncate_block(inode, offset + len, 0, 1);
+ ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
+ 0, 1);
if (ret)
goto out;
} else {
@@ -3280,6 +3308,10 @@ static long btrfs_fallocate(struct file *file, int mode,
int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
int ret;
+ /* Do not allow fallocate in ZONED mode */
+ if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
+ return -EOPNOTSUPP;
+
alloc_start = round_down(offset, blocksize);
alloc_end = round_up(offset + len, blocksize);
cur_offset = alloc_start;
@@ -3304,7 +3336,7 @@ static long btrfs_fallocate(struct file *file, int mode,
return ret;
}
- inode_lock(inode);
+ btrfs_inode_lock(inode, 0);
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
ret = inode_newsize_ok(inode, offset + len);
@@ -3320,7 +3352,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* But that's a minor problem and won't do much harm BTW.
*/
if (alloc_start > inode->i_size) {
- ret = btrfs_cont_expand(inode, i_size_read(inode),
+ ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
alloc_start);
if (ret)
goto out;
@@ -3330,7 +3362,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* need to zero out the end of the block if i_size lands in the
* middle of a block.
*/
- ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
if (ret)
goto out;
}
@@ -3543,9 +3575,9 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
return generic_file_llseek(file, offset, whence);
case SEEK_DATA:
case SEEK_HOLE:
- inode_lock_shared(inode);
+ btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
offset = find_desired_extent(inode, offset, whence);
- inode_unlock_shared(inode);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
break;
}
@@ -3561,16 +3593,47 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
return generic_file_open(inode, filp);
}
+static int check_direct_read(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ int ret;
+ int i, seg;
+
+ ret = check_direct_IO(fs_info, iter, offset);
+ if (ret < 0)
+ return ret;
+
+ if (!iter_is_iovec(iter))
+ return 0;
+
+ for (seg = 0; seg < iter->nr_segs; seg++)
+ for (i = seg + 1; i < iter->nr_segs; i++)
+ if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
+ return -EINVAL;
+ return 0;
+}
+
+static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
+ return 0;
+
+ btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
+ ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+ is_sync_kiocb(iocb));
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ return ret;
+}
+
static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
ssize_t ret = 0;
if (iocb->ki_flags & IOCB_DIRECT) {
- struct inode *inode = file_inode(iocb->ki_filp);
-
- inode_lock_shared(inode);
- ret = btrfs_direct_IO(iocb, to);
- inode_unlock_shared(inode);
+ ret = btrfs_direct_read(iocb, to);
if (ret < 0 || !iov_iter_count(to) ||
iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
return ret;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index af0013d3df63..4d8897879c9c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -16,7 +16,6 @@
#include "transaction.h"
#include "disk-io.h"
#include "extent_io.h"
-#include "inode-map.h"
#include "volumes.h"
#include "space-info.h"
#include "delalloc-space.h"
@@ -33,16 +32,18 @@ struct btrfs_trim_range {
struct list_head list;
};
-static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *bitmap_info);
static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
-static int btrfs_wait_cache_io_root(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_io_ctl *io_ctl,
- struct btrfs_path *path);
+static int search_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *bitmap_info, u64 *offset,
+ u64 *bytes, bool for_alloc);
+static void free_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *bitmap_info);
+static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes);
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path,
@@ -141,17 +142,15 @@ static int __create_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header *header;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
- u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
+ /* We inline CRCs for the free disk space cache */
+ const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC |
+ BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
int ret;
ret = btrfs_insert_empty_inode(trans, root, path, ino);
if (ret)
return ret;
- /* We inline crc's for the free disk space cache */
- if (ino != BTRFS_FREE_INO_OBJECTID)
- flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
-
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
@@ -207,6 +206,65 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
ino, block_group->start);
}
+/*
+ * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode
+ * handles lookup, otherwise it takes ownership and iputs the inode.
+ * Don't reuse an inode pointer after passing it into this function.
+ */
+int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int ret = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ if (!inode)
+ inode = lookup_free_space_inode(block_group, path);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) != -ENOENT)
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+ ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ if (ret) {
+ btrfs_add_delayed_iput(inode);
+ goto out;
+ }
+ clear_nlink(inode);
+ /* One for the block groups ref */
+ spin_lock(&block_group->lock);
+ if (block_group->iref) {
+ block_group->iref = 0;
+ block_group->inode = NULL;
+ spin_unlock(&block_group->lock);
+ iput(inode);
+ } else {
+ spin_unlock(&block_group->lock);
+ }
+ /* One for the lookup ref */
+ btrfs_add_delayed_iput(inode);
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+ key.type = 0;
+ key.offset = block_group->start;
+ ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path,
+ -1, 1);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out;
+ }
+ ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
@@ -267,12 +325,12 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
* We skip the throttling logic for free space cache inodes, so we don't
* need to check for -EAGAIN.
*/
- ret = btrfs_truncate_inode_items(trans, root, inode,
+ ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
0, BTRFS_EXTENT_DATA_KEY);
if (ret)
goto fail;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
fail:
if (locked)
@@ -304,16 +362,11 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
int write)
{
int num_pages;
- int check_crcs = 0;
num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
- if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
- check_crcs = 1;
-
/* Make sure we can fit our crcs and generation into the first page */
- if (write && check_crcs &&
- (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
+ if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
return -ENOSPC;
memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -324,7 +377,6 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
io_ctl->num_pages = num_pages;
io_ctl->fs_info = btrfs_sb(inode->i_sb);
- io_ctl->check_crcs = check_crcs;
io_ctl->inode = inode;
return 0;
@@ -419,13 +471,8 @@ static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
* Skip the csum areas. If we don't check crcs then we just have a
* 64bit chunk at the front of the first page.
*/
- if (io_ctl->check_crcs) {
- io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
- io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
- } else {
- io_ctl->cur += sizeof(u64);
- io_ctl->size -= sizeof(u64) * 2;
- }
+ io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
+ io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
put_unaligned_le64(generation, io_ctl->cur);
io_ctl->cur += sizeof(u64);
@@ -439,14 +486,8 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
* Skip the crc area. If we don't check crcs then we just have a 64bit
* chunk at the front of the first page.
*/
- if (io_ctl->check_crcs) {
- io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
- io_ctl->size -= sizeof(u64) +
- (sizeof(u32) * io_ctl->num_pages);
- } else {
- io_ctl->cur += sizeof(u64);
- io_ctl->size -= sizeof(u64) * 2;
- }
+ io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
+ io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
cache_gen = get_unaligned_le64(io_ctl->cur);
if (cache_gen != generation) {
@@ -466,11 +507,6 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
u32 crc = ~(u32)0;
unsigned offset = 0;
- if (!io_ctl->check_crcs) {
- io_ctl_unmap_page(io_ctl);
- return;
- }
-
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
@@ -488,11 +524,6 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
u32 crc = ~(u32)0;
unsigned offset = 0;
- if (!io_ctl->check_crcs) {
- io_ctl_map_page(io_ctl, 0);
- return 0;
- }
-
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
@@ -625,42 +656,42 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
return 0;
}
-/*
- * Since we attach pinned extents after the fact we can have contiguous sections
- * of free space that are split up in entries. This poses a problem with the
- * tree logging stuff since it could have allocated across what appears to be 2
- * entries since we would have merged the entries when adding the pinned extents
- * back to the free space cache. So run through the space cache that we just
- * loaded and merge contiguous entries. This will make the log replay stuff not
- * blow up and it will make for nicer allocator behavior.
- */
-static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
- struct btrfs_free_space *e, *prev = NULL;
- struct rb_node *n;
+ struct btrfs_block_group *block_group = ctl->private;
+ u64 max_bytes;
+ u64 bitmap_bytes;
+ u64 extent_bytes;
+ u64 size = block_group->length;
+ u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+ u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
-again:
- spin_lock(&ctl->tree_lock);
- for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
- e = rb_entry(n, struct btrfs_free_space, offset_index);
- if (!prev)
- goto next;
- if (e->bitmap || prev->bitmap)
- goto next;
- if (prev->offset + prev->bytes == e->offset) {
- unlink_free_space(ctl, prev);
- unlink_free_space(ctl, e);
- prev->bytes += e->bytes;
- kmem_cache_free(btrfs_free_space_cachep, e);
- link_free_space(ctl, prev);
- prev = NULL;
- spin_unlock(&ctl->tree_lock);
- goto again;
- }
-next:
- prev = e;
- }
- spin_unlock(&ctl->tree_lock);
+ max_bitmaps = max_t(u64, max_bitmaps, 1);
+
+ ASSERT(ctl->total_bitmaps <= max_bitmaps);
+
+ /*
+ * We are trying to keep the total amount of memory used per 1GiB of
+ * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation
+ * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
+ * bitmaps, we may end up using more memory than this.
+ */
+ if (size < SZ_1G)
+ max_bytes = MAX_CACHE_BYTES_PER_GIG;
+ else
+ max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
+
+ bitmap_bytes = ctl->total_bitmaps * ctl->unit;
+
+ /*
+ * we want the extent entry threshold to always be at most 1/2 the max
+ * bytes we can have, or whatever is less than that.
+ */
+ extent_bytes = max_bytes - bitmap_bytes;
+ extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
+
+ ctl->extents_thresh =
+ div_u64(extent_bytes, sizeof(struct btrfs_free_space));
}
static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
@@ -753,16 +784,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
goto free_cache;
}
- /*
- * Sync discard ensures that the free space cache is always
- * trimmed. So when reading this in, the state should reflect
- * that. We also do this for async as a stop gap for lack of
- * persistence.
- */
- if (btrfs_test_opt(fs_info, DISCARD_SYNC) ||
- btrfs_test_opt(fs_info, DISCARD_ASYNC))
- e->trim_state = BTRFS_TRIM_STATE_TRIMMED;
-
if (!e->bytes) {
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@@ -791,7 +812,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
spin_lock(&ctl->tree_lock);
ret = link_free_space(ctl, e);
ctl->total_bitmaps++;
- ctl->op->recalc_thresholds(ctl);
+ recalculate_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
btrfs_err(fs_info,
@@ -816,19 +837,11 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ret = io_ctl_read_bitmap(&io_ctl, e);
if (ret)
goto free_cache;
- e->bitmap_extents = count_bitmap_extents(ctl, e);
- if (!btrfs_free_space_trimmed(e)) {
- ctl->discardable_extents[BTRFS_STAT_CURR] +=
- e->bitmap_extents;
- ctl->discardable_bytes[BTRFS_STAT_CURR] += e->bytes;
- }
}
io_ctl_drop_pages(&io_ctl);
- merge_space_tree(ctl);
ret = 1;
out:
- btrfs_discard_update_discardable(ctl->private, ctl);
io_ctl_free(&io_ctl);
return ret;
free_cache:
@@ -837,10 +850,46 @@ free_cache:
goto out;
}
+static int copy_free_space_cache(struct btrfs_block_group *block_group,
+ struct btrfs_free_space_ctl *ctl)
+{
+ struct btrfs_free_space *info;
+ struct rb_node *n;
+ int ret = 0;
+
+ while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) {
+ info = rb_entry(n, struct btrfs_free_space, offset_index);
+ if (!info->bitmap) {
+ unlink_free_space(ctl, info);
+ ret = btrfs_add_free_space(block_group, info->offset,
+ info->bytes);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ } else {
+ u64 offset = info->offset;
+ u64 bytes = ctl->unit;
+
+ while (search_bitmap(ctl, info, &offset, &bytes,
+ false) == 0) {
+ ret = btrfs_add_free_space(block_group, offset,
+ bytes);
+ if (ret)
+ break;
+ bitmap_clear_bits(ctl, info, offset, bytes);
+ offset = info->offset;
+ bytes = ctl->unit;
+ }
+ free_bitmap(ctl, info);
+ }
+ cond_resched();
+ }
+ return ret;
+}
+
int load_free_space_cache(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ struct btrfs_free_space_ctl tmp_ctl = {};
struct inode *inode;
struct btrfs_path *path;
int ret = 0;
@@ -848,6 +897,13 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
u64 used = block_group->used;
/*
+ * Because we could potentially discard our loaded free space, we want
+ * to load everything into a temporary structure first, and then if it's
+ * valid copy it all into the actual free space ctl.
+ */
+ btrfs_init_free_space_ctl(block_group, &tmp_ctl);
+
+ /*
* If this block group has been marked to be cleared for one reason or
* another then we can't trust the on disk cache, so just return.
*/
@@ -898,19 +954,25 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
}
spin_unlock(&block_group->lock);
- ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
+ ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
path, block_group->start);
btrfs_free_path(path);
if (ret <= 0)
goto out;
- spin_lock(&ctl->tree_lock);
- matched = (ctl->free_space == (block_group->length - used -
- block_group->bytes_super));
- spin_unlock(&ctl->tree_lock);
+ matched = (tmp_ctl.free_space == (block_group->length - used -
+ block_group->bytes_super));
- if (!matched) {
- __btrfs_remove_free_space_cache(ctl);
+ if (matched) {
+ ret = copy_free_space_cache(block_group, &tmp_ctl);
+ /*
+ * ret == 1 means we successfully loaded the free space cache,
+ * so we need to re-set it here.
+ */
+ if (ret == 0)
+ ret = 1;
+ } else {
+ __btrfs_remove_free_space_cache(&tmp_ctl);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
block_group->start);
@@ -929,6 +991,9 @@ out:
block_group->start);
}
+ spin_lock(&ctl->tree_lock);
+ btrfs_discard_update_discardable(block_group);
+ spin_unlock(&ctl->tree_lock);
iput(inode);
return ret;
}
@@ -1191,7 +1256,7 @@ out:
"failed to write free space cache for block group %llu error %d",
block_group->start, ret);
}
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, root, BTRFS_I(inode));
if (block_group) {
/* the dirty list is protected by the dirty_bgs_lock */
@@ -1220,14 +1285,6 @@ out:
}
-static int btrfs_wait_cache_io_root(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_io_ctl *io_ctl,
- struct btrfs_path *path)
-{
- return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
-}
-
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path)
@@ -1332,7 +1389,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
/* Everything is written out, now we dirty the pages in the file. */
ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
io_ctl->num_pages, 0, i_size_read(inode),
- &cached_state);
+ &cached_state, false);
if (ret)
goto out_nospc;
@@ -1381,7 +1438,7 @@ out:
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
}
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, root, BTRFS_I(inode));
if (must_iput)
iput(inode);
return ret;
@@ -1672,44 +1729,6 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
return ret;
}
-static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
-{
- struct btrfs_block_group *block_group = ctl->private;
- u64 max_bytes;
- u64 bitmap_bytes;
- u64 extent_bytes;
- u64 size = block_group->length;
- u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
- u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
-
- max_bitmaps = max_t(u64, max_bitmaps, 1);
-
- ASSERT(ctl->total_bitmaps <= max_bitmaps);
-
- /*
- * We are trying to keep the total amount of memory used per 1GiB of
- * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation
- * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
- * bitmaps, we may end up using more memory than this.
- */
- if (size < SZ_1G)
- max_bytes = MAX_CACHE_BYTES_PER_GIG;
- else
- max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
-
- bitmap_bytes = ctl->total_bitmaps * ctl->unit;
-
- /*
- * we want the extent entry threshold to always be at most 1/2 the max
- * bytes we can have, or whatever is less than that.
- */
- extent_bytes = max_bytes - bitmap_bytes;
- extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
-
- ctl->extents_thresh =
- div_u64(extent_bytes, sizeof(struct btrfs_free_space));
-}
-
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info,
u64 offset, u64 bytes)
@@ -1912,29 +1931,6 @@ out:
return NULL;
}
-static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *bitmap_info)
-{
- struct btrfs_block_group *block_group = ctl->private;
- u64 bytes = bitmap_info->bytes;
- unsigned int rs, re;
- int count = 0;
-
- if (!block_group || !bytes)
- return count;
-
- bitmap_for_each_set_region(bitmap_info->bitmap, rs, re, 0,
- BITS_PER_BITMAP) {
- bytes -= (rs - re) * ctl->unit;
- count++;
-
- if (!bytes)
- break;
- }
-
- return count;
-}
-
static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset)
{
@@ -1944,8 +1940,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
INIT_LIST_HEAD(&info->list);
link_free_space(ctl, info);
ctl->total_bitmaps++;
-
- ctl->op->recalc_thresholds(ctl);
+ recalculate_thresholds(ctl);
}
static void free_bitmap(struct btrfs_free_space_ctl *ctl,
@@ -1967,7 +1962,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
ctl->total_bitmaps--;
- ctl->op->recalc_thresholds(ctl);
+ recalculate_thresholds(ctl);
}
static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
@@ -2134,7 +2129,6 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
}
static const struct btrfs_free_space_op free_space_op = {
- .recalc_thresholds = recalculate_thresholds,
.use_bitmap = use_bitmap,
};
@@ -2508,7 +2502,7 @@ link:
if (ret)
kmem_cache_free(btrfs_free_space_cachep, info);
out:
- btrfs_discard_update_discardable(block_group, ctl);
+ btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
if (ret) {
@@ -2643,7 +2637,7 @@ again:
goto again;
}
out_lock:
- btrfs_discard_update_discardable(block_group, ctl);
+ btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
out:
return ret;
@@ -2674,10 +2668,10 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
"%d blocks of free space at or bigger than bytes is", count);
}
-void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group)
+void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
+ struct btrfs_free_space_ctl *ctl)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock);
ctl->unit = fs_info->sectorsize;
@@ -2779,7 +2773,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
spin_lock(&ctl->tree_lock);
__btrfs_remove_free_space_cache_locked(ctl);
if (ctl->private)
- btrfs_discard_update_discardable(ctl->private, ctl);
+ btrfs_discard_update_discardable(ctl->private);
spin_unlock(&ctl->tree_lock);
}
@@ -2801,7 +2795,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
cond_resched_lock(&ctl->tree_lock);
}
__btrfs_remove_free_space_cache_locked(ctl);
- btrfs_discard_update_discardable(block_group, ctl);
+ btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
}
@@ -2885,7 +2879,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
link_free_space(ctl, entry);
}
out:
- btrfs_discard_update_discardable(block_group, ctl);
+ btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
if (align_gap_len)
@@ -3054,7 +3048,7 @@ out:
kmem_cache_free(btrfs_free_space_bitmap_cachep,
entry->bitmap);
ctl->total_bitmaps--;
- ctl->op->recalc_thresholds(ctl);
+ recalculate_thresholds(ctl);
} else if (!btrfs_free_space_trimmed(entry)) {
ctl->discardable_extents[BTRFS_STAT_CURR]--;
}
@@ -3828,166 +3822,62 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
return ret;
}
-/*
- * Find the left-most item in the cache tree, and then return the
- * smallest inode number in the item.
- *
- * Note: the returned inode number may not be the smallest one in
- * the tree, if the left-most item is a bitmap.
- */
-u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
+bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info)
{
- struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
- struct btrfs_free_space *entry = NULL;
- u64 ino = 0;
-
- spin_lock(&ctl->tree_lock);
-
- if (RB_EMPTY_ROOT(&ctl->free_space_offset))
- goto out;
-
- entry = rb_entry(rb_first(&ctl->free_space_offset),
- struct btrfs_free_space, offset_index);
-
- if (!entry->bitmap) {
- ino = entry->offset;
-
- unlink_free_space(ctl, entry);
- entry->offset++;
- entry->bytes--;
- if (!entry->bytes)
- kmem_cache_free(btrfs_free_space_cachep, entry);
- else
- link_free_space(ctl, entry);
- } else {
- u64 offset = 0;
- u64 count = 1;
- int ret;
-
- ret = search_bitmap(ctl, entry, &offset, &count, true);
- /* Logic error; Should be empty if it can't find anything */
- ASSERT(!ret);
-
- ino = offset;
- bitmap_clear_bits(ctl, entry, offset, 1);
- if (entry->bytes == 0)
- free_bitmap(ctl, entry);
- }
-out:
- spin_unlock(&ctl->tree_lock);
-
- return ino;
+ return btrfs_super_cache_generation(fs_info->super_copy);
}
-struct inode *lookup_free_ino_inode(struct btrfs_root *root,
- struct btrfs_path *path)
+static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans)
{
- struct inode *inode = NULL;
-
- spin_lock(&root->ino_cache_lock);
- if (root->ino_cache_inode)
- inode = igrab(root->ino_cache_inode);
- spin_unlock(&root->ino_cache_lock);
- if (inode)
- return inode;
-
- inode = __lookup_free_space_inode(root, path, 0);
- if (IS_ERR(inode))
- return inode;
-
- spin_lock(&root->ino_cache_lock);
- if (!btrfs_fs_closing(root->fs_info))
- root->ino_cache_inode = igrab(inode);
- spin_unlock(&root->ino_cache_lock);
-
- return inode;
-}
-
-int create_free_ino_inode(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path)
-{
- return __create_free_space_inode(root, trans, path,
- BTRFS_FREE_INO_OBJECTID, 0);
-}
-
-int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
-{
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct btrfs_path *path;
- struct inode *inode;
- int ret = 0;
- u64 root_gen = btrfs_root_generation(&root->root_item);
-
- if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
- return 0;
-
- /*
- * If we're unmounting then just return, since this does a search on the
- * normal root and not the commit root and we could deadlock.
- */
- if (btrfs_fs_closing(fs_info))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return 0;
-
- inode = lookup_free_ino_inode(root, path);
- if (IS_ERR(inode))
- goto out;
-
- if (root_gen != BTRFS_I(inode)->generation)
- goto out_put;
+ struct btrfs_block_group *block_group;
+ struct rb_node *node;
+ int ret;
- ret = __load_free_space_cache(root, inode, ctl, path, 0);
+ btrfs_info(fs_info, "cleaning free space cache v1");
- if (ret < 0)
- btrfs_err(fs_info,
- "failed to load free ino cache for root %llu",
- root->root_key.objectid);
-out_put:
- iput(inode);
+ node = rb_first(&fs_info->block_group_cache_tree);
+ while (node) {
+ block_group = rb_entry(node, struct btrfs_block_group, cache_node);
+ ret = btrfs_remove_free_space_inode(trans, NULL, block_group);
+ if (ret)
+ goto out;
+ node = rb_next(node);
+ }
out:
- btrfs_free_path(path);
return ret;
}
-int btrfs_write_out_ino_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct inode *inode)
+int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_trans_handle *trans;
int ret;
- struct btrfs_io_ctl io_ctl;
- bool release_metadata = true;
- if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
- return 0;
+ /*
+ * update_super_roots will appropriately set or unset
+ * super_copy->cache_generation based on SPACE_CACHE and
+ * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a
+ * transaction commit whether we are enabling space cache v1 and don't
+ * have any other work to do, or are disabling it and removing free
+ * space inodes.
+ */
+ trans = btrfs_start_transaction(fs_info->tree_root, 0);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
- memset(&io_ctl, 0, sizeof(io_ctl));
- ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans);
- if (!ret) {
- /*
- * At this point writepages() didn't error out, so our metadata
- * reservation is released when the writeback finishes, at
- * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
- * with or without an error.
- */
- release_metadata = false;
- ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
+ if (!active) {
+ set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
+ ret = cleanup_free_space_cache_v1(fs_info, trans);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ goto out;
+ }
}
- if (ret) {
- if (release_metadata)
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- inode->i_size, true);
- btrfs_debug(fs_info,
- "failed to write free ino cache for root %llu error %d",
- root->root_key.objectid, ret);
- }
+ ret = btrfs_commit_transaction(trans);
+out:
+ clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
return ret;
}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index e3d5e0ad8f8e..ecb09a02d544 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -60,7 +60,6 @@ struct btrfs_free_space_ctl {
};
struct btrfs_free_space_op {
- void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
};
@@ -76,7 +75,6 @@ struct btrfs_io_ctl {
int num_pages;
int entries;
int bitmaps;
- unsigned check_crcs:1;
};
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
@@ -84,6 +82,9 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
int create_free_space_inode(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path);
+int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ struct btrfs_block_group *block_group);
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
@@ -97,19 +98,9 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path);
-struct inode *lookup_free_ino_inode(struct btrfs_root *root,
- struct btrfs_path *path);
-int create_free_ino_inode(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path);
-int load_free_ino_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_root *root);
-int btrfs_write_out_ino_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct inode *inode);
-void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group);
+void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
+ struct btrfs_free_space_ctl *ctl);
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size,
@@ -126,7 +117,6 @@ bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group);
u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 offset, u64 bytes, u64 empty_size,
u64 *max_extent_size);
-u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group *block_group,
u64 bytes);
int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
@@ -148,6 +138,8 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen,
u64 maxlen, bool async);
+bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info);
+int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active);
/* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int test_add_free_space_entry(struct btrfs_block_group *cache,
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 6b9faf3b0e96..e33a65bd9a0c 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -136,9 +136,10 @@ static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans,
return 0;
}
-static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
+static inline u32 free_space_bitmap_size(const struct btrfs_fs_info *fs_info,
+ u64 size)
{
- return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
+ return DIV_ROUND_UP(size >> fs_info->sectorsize_bits, BITS_PER_BYTE);
}
static unsigned long *alloc_bitmap(u32 bitmap_size)
@@ -200,8 +201,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
int done = 0, nr;
int ret;
- bitmap_size = free_space_bitmap_size(block_group->length,
- fs_info->sectorsize);
+ bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
ret = -ENOMEM;
@@ -290,8 +290,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
u32 data_size;
extent_size = min(end - i, bitmap_range);
- data_size = free_space_bitmap_size(extent_size,
- fs_info->sectorsize);
+ data_size = free_space_bitmap_size(fs_info, extent_size);
key.objectid = i;
key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
@@ -339,8 +338,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
int done = 0, nr;
int ret;
- bitmap_size = free_space_bitmap_size(block_group->length,
- fs_info->sectorsize);
+ bitmap_size = free_space_bitmap_size(fs_info, block_group->length);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
ret = -ENOMEM;
@@ -383,8 +381,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
fs_info->sectorsize *
BITS_PER_BYTE);
bitmap_cursor = ((char *)bitmap) + bitmap_pos;
- data_size = free_space_bitmap_size(found_key.offset,
- fs_info->sectorsize);
+ data_size = free_space_bitmap_size(fs_info,
+ found_key.offset);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
read_extent_buffer(leaf, bitmap_cursor, ptr,
@@ -416,7 +414,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize);
+ nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
start_bit = find_next_bit_le(bitmap, nrbits, 0);
while (start_bit < nrbits) {
@@ -540,8 +538,8 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
end = found_end;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- first = div_u64(*start - found_start, fs_info->sectorsize);
- last = div_u64(end - found_start, fs_info->sectorsize);
+ first = (*start - found_start) >> fs_info->sectorsize_bits;
+ last = (end - found_start) >> fs_info->sectorsize_bits;
if (bit)
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
@@ -1195,8 +1193,6 @@ static int clear_free_space_tree(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
-
key.objectid = 0;
key.type = 0;
key.offset = 0;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 668701832845..37f36ffdaf6b 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -119,8 +119,6 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
-
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
@@ -193,8 +191,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
-
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = -ENOENT;
@@ -270,7 +266,6 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
if (ret == -EEXIST) {
@@ -327,7 +322,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
path->skip_release_on_error = 1;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
deleted file mode 100644
index 76d2e43817ea..000000000000
--- a/fs/btrfs/inode-map.c
+++ /dev/null
@@ -1,582 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2007 Oracle. All rights reserved.
- */
-
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-
-#include "ctree.h"
-#include "disk-io.h"
-#include "free-space-cache.h"
-#include "inode-map.h"
-#include "transaction.h"
-#include "delalloc-space.h"
-
-static void fail_caching_thread(struct btrfs_root *root)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
- btrfs_warn(fs_info, "failed to start inode caching task");
- btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
- "disabling inode map caching");
- spin_lock(&root->ino_cache_lock);
- root->ino_cache_state = BTRFS_CACHE_ERROR;
- spin_unlock(&root->ino_cache_lock);
- wake_up(&root->ino_cache_wait);
-}
-
-static int caching_kthread(void *data)
-{
- struct btrfs_root *root = data;
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct btrfs_key key;
- struct btrfs_path *path;
- struct extent_buffer *leaf;
- u64 last = (u64)-1;
- int slot;
- int ret;
-
- if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path) {
- fail_caching_thread(root);
- return -ENOMEM;
- }
-
- /* Since the commit root is read-only, we can safely skip locking. */
- path->skip_locking = 1;
- path->search_commit_root = 1;
- path->reada = READA_FORWARD;
-
- key.objectid = BTRFS_FIRST_FREE_OBJECTID;
- key.offset = 0;
- key.type = BTRFS_INODE_ITEM_KEY;
-again:
- /* need to make sure the commit_root doesn't disappear */
- down_read(&fs_info->commit_root_sem);
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-
- while (1) {
- if (btrfs_fs_closing(fs_info))
- goto out;
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- else if (ret > 0)
- break;
-
- if (need_resched() ||
- btrfs_transaction_in_commit(fs_info)) {
- leaf = path->nodes[0];
-
- if (WARN_ON(btrfs_header_nritems(leaf) == 0))
- break;
-
- /*
- * Save the key so we can advances forward
- * in the next search.
- */
- btrfs_item_key_to_cpu(leaf, &key, 0);
- btrfs_release_path(path);
- root->ino_cache_progress = last;
- up_read(&fs_info->commit_root_sem);
- schedule_timeout(1);
- goto again;
- } else
- continue;
- }
-
- btrfs_item_key_to_cpu(leaf, &key, slot);
-
- if (key.type != BTRFS_INODE_ITEM_KEY)
- goto next;
-
- if (key.objectid >= root->highest_objectid)
- break;
-
- if (last != (u64)-1 && last + 1 != key.objectid) {
- __btrfs_add_free_space(fs_info, ctl, last + 1,
- key.objectid - last - 1, 0);
- wake_up(&root->ino_cache_wait);
- }
-
- last = key.objectid;
-next:
- path->slots[0]++;
- }
-
- if (last < root->highest_objectid - 1) {
- __btrfs_add_free_space(fs_info, ctl, last + 1,
- root->highest_objectid - last - 1, 0);
- }
-
- spin_lock(&root->ino_cache_lock);
- root->ino_cache_state = BTRFS_CACHE_FINISHED;
- spin_unlock(&root->ino_cache_lock);
-
- root->ino_cache_progress = (u64)-1;
- btrfs_unpin_free_ino(root);
-out:
- wake_up(&root->ino_cache_wait);
- up_read(&fs_info->commit_root_sem);
-
- btrfs_free_path(path);
-
- return ret;
-}
-
-static void start_caching(struct btrfs_root *root)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct task_struct *tsk;
- int ret;
- u64 objectid;
-
- if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
- return;
-
- spin_lock(&root->ino_cache_lock);
- if (root->ino_cache_state != BTRFS_CACHE_NO) {
- spin_unlock(&root->ino_cache_lock);
- return;
- }
-
- root->ino_cache_state = BTRFS_CACHE_STARTED;
- spin_unlock(&root->ino_cache_lock);
-
- ret = load_free_ino_cache(fs_info, root);
- if (ret == 1) {
- spin_lock(&root->ino_cache_lock);
- root->ino_cache_state = BTRFS_CACHE_FINISHED;
- spin_unlock(&root->ino_cache_lock);
- wake_up(&root->ino_cache_wait);
- return;
- }
-
- /*
- * It can be quite time-consuming to fill the cache by searching
- * through the extent tree, and this can keep ino allocation path
- * waiting. Therefore at start we quickly find out the highest
- * inode number and we know we can use inode numbers which fall in
- * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
- */
- ret = btrfs_find_free_objectid(root, &objectid);
- if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
- __btrfs_add_free_space(fs_info, ctl, objectid,
- BTRFS_LAST_FREE_OBJECTID - objectid + 1,
- 0);
- wake_up(&root->ino_cache_wait);
- }
-
- tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
- root->root_key.objectid);
- if (IS_ERR(tsk))
- fail_caching_thread(root);
-}
-
-int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
-{
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
- return btrfs_find_free_objectid(root, objectid);
-
-again:
- *objectid = btrfs_find_ino_for_alloc(root);
-
- if (*objectid != 0)
- return 0;
-
- start_caching(root);
-
- wait_event(root->ino_cache_wait,
- root->ino_cache_state == BTRFS_CACHE_FINISHED ||
- root->ino_cache_state == BTRFS_CACHE_ERROR ||
- root->free_ino_ctl->free_space > 0);
-
- if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
- root->free_ino_ctl->free_space == 0)
- return -ENOSPC;
- else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
- return btrfs_find_free_objectid(root, objectid);
- else
- goto again;
-}
-
-void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
-
- if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
- return;
-again:
- if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
- __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0);
- } else {
- down_write(&fs_info->commit_root_sem);
- spin_lock(&root->ino_cache_lock);
- if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
- spin_unlock(&root->ino_cache_lock);
- up_write(&fs_info->commit_root_sem);
- goto again;
- }
- spin_unlock(&root->ino_cache_lock);
-
- start_caching(root);
-
- __btrfs_add_free_space(fs_info, pinned, objectid, 1, 0);
-
- up_write(&fs_info->commit_root_sem);
- }
-}
-
-/*
- * When a transaction is committed, we'll move those inode numbers which are
- * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
- * others will just be dropped, because the commit root we were searching has
- * changed.
- *
- * Must be called with root->fs_info->commit_root_sem held
- */
-void btrfs_unpin_free_ino(struct btrfs_root *root)
-{
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
- spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
- struct btrfs_free_space *info;
- struct rb_node *n;
- u64 count;
-
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
- return;
-
- while (1) {
- spin_lock(rbroot_lock);
- n = rb_first(rbroot);
- if (!n) {
- spin_unlock(rbroot_lock);
- break;
- }
-
- info = rb_entry(n, struct btrfs_free_space, offset_index);
- BUG_ON(info->bitmap); /* Logic error */
-
- if (info->offset > root->ino_cache_progress)
- count = 0;
- else
- count = min(root->ino_cache_progress - info->offset + 1,
- info->bytes);
-
- rb_erase(&info->offset_index, rbroot);
- spin_unlock(rbroot_lock);
- if (count)
- __btrfs_add_free_space(root->fs_info, ctl,
- info->offset, count, 0);
- kmem_cache_free(btrfs_free_space_cachep, info);
- }
-}
-
-#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space))
-#define INODES_PER_BITMAP (PAGE_SIZE * 8)
-
-/*
- * The goal is to keep the memory used by the free_ino tree won't
- * exceed the memory if we use bitmaps only.
- */
-static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
-{
- struct btrfs_free_space *info;
- struct rb_node *n;
- int max_ino;
- int max_bitmaps;
-
- n = rb_last(&ctl->free_space_offset);
- if (!n) {
- ctl->extents_thresh = INIT_THRESHOLD;
- return;
- }
- info = rb_entry(n, struct btrfs_free_space, offset_index);
-
- /*
- * Find the maximum inode number in the filesystem. Note we
- * ignore the fact that this can be a bitmap, because we are
- * not doing precise calculation.
- */
- max_ino = info->bytes - 1;
-
- max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
- if (max_bitmaps <= ctl->total_bitmaps) {
- ctl->extents_thresh = 0;
- return;
- }
-
- ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
- PAGE_SIZE / sizeof(*info);
-}
-
-/*
- * We don't fall back to bitmap, if we are below the extents threshold
- * or this chunk of inode numbers is a big one.
- */
-static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info)
-{
- if (ctl->free_extents < ctl->extents_thresh ||
- info->bytes > INODES_PER_BITMAP / 10)
- return false;
-
- return true;
-}
-
-static const struct btrfs_free_space_op free_ino_op = {
- .recalc_thresholds = recalculate_thresholds,
- .use_bitmap = use_bitmap,
-};
-
-static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
-{
-}
-
-static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info)
-{
- /*
- * We always use extents for two reasons:
- *
- * - The pinned tree is only used during the process of caching
- * work.
- * - Make code simpler. See btrfs_unpin_free_ino().
- */
- return false;
-}
-
-static const struct btrfs_free_space_op pinned_free_ino_op = {
- .recalc_thresholds = pinned_recalc_thresholds,
- .use_bitmap = pinned_use_bitmap,
-};
-
-void btrfs_init_free_ino_ctl(struct btrfs_root *root)
-{
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
-
- spin_lock_init(&ctl->tree_lock);
- ctl->unit = 1;
- ctl->start = 0;
- ctl->private = NULL;
- ctl->op = &free_ino_op;
- INIT_LIST_HEAD(&ctl->trimming_ranges);
- mutex_init(&ctl->cache_writeout_mutex);
-
- /*
- * Initially we allow to use 16K of ram to cache chunks of
- * inode numbers before we resort to bitmaps. This is somewhat
- * arbitrary, but it will be adjusted in runtime.
- */
- ctl->extents_thresh = INIT_THRESHOLD;
-
- spin_lock_init(&pinned->tree_lock);
- pinned->unit = 1;
- pinned->start = 0;
- pinned->private = NULL;
- pinned->extents_thresh = 0;
- pinned->op = &pinned_free_ino_op;
-}
-
-int btrfs_save_ino_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
- struct btrfs_path *path;
- struct inode *inode;
- struct btrfs_block_rsv *rsv;
- struct extent_changeset *data_reserved = NULL;
- u64 num_bytes;
- u64 alloc_hint = 0;
- int ret;
- int prealloc;
- bool retry = false;
-
- /* only fs tree and subvol/snap needs ino cache */
- if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
- (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
- root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
- return 0;
-
- /* Don't save inode cache if we are deleting this root */
- if (btrfs_root_refs(&root->root_item) == 0)
- return 0;
-
- if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- rsv = trans->block_rsv;
- trans->block_rsv = &fs_info->trans_block_rsv;
-
- num_bytes = trans->bytes_reserved;
- /*
- * 1 item for inode item insertion if need
- * 4 items for inode item update (in the worst case)
- * 1 items for slack space if we need do truncation
- * 1 item for free space object
- * 3 items for pre-allocation
- */
- trans->bytes_reserved = btrfs_calc_insert_metadata_size(fs_info, 10);
- ret = btrfs_block_rsv_add(root, trans->block_rsv,
- trans->bytes_reserved,
- BTRFS_RESERVE_NO_FLUSH);
- if (ret)
- goto out;
- trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
- trans->bytes_reserved, 1);
-again:
- inode = lookup_free_ino_inode(root, path);
- if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
- ret = PTR_ERR(inode);
- goto out_release;
- }
-
- if (IS_ERR(inode)) {
- BUG_ON(retry); /* Logic error */
- retry = true;
-
- ret = create_free_ino_inode(root, trans, path);
- if (ret)
- goto out_release;
- goto again;
- }
-
- BTRFS_I(inode)->generation = 0;
- ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out_put;
- }
-
- if (i_size_read(inode) > 0) {
- ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
- if (ret) {
- if (ret != -ENOSPC)
- btrfs_abort_transaction(trans, ret);
- goto out_put;
- }
- }
-
- spin_lock(&root->ino_cache_lock);
- if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
- ret = -1;
- spin_unlock(&root->ino_cache_lock);
- goto out_put;
- }
- spin_unlock(&root->ino_cache_lock);
-
- spin_lock(&ctl->tree_lock);
- prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
- prealloc = ALIGN(prealloc, PAGE_SIZE);
- prealloc += ctl->total_bitmaps * PAGE_SIZE;
- spin_unlock(&ctl->tree_lock);
-
- /* Just to make sure we have enough space */
- prealloc += 8 * PAGE_SIZE;
-
- ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 0,
- prealloc);
- if (ret)
- goto out_put;
-
- ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
- prealloc, prealloc, &alloc_hint);
- if (ret) {
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
- btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
- goto out_put;
- }
-
- ret = btrfs_write_out_ino_cache(root, trans, path, inode);
- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
-out_put:
- iput(inode);
-out_release:
- trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
- trans->bytes_reserved, 0);
- btrfs_block_rsv_release(fs_info, trans->block_rsv,
- trans->bytes_reserved, NULL);
-out:
- trans->block_rsv = rsv;
- trans->bytes_reserved = num_bytes;
-
- btrfs_free_path(path);
- extent_changeset_free(data_reserved);
- return ret;
-}
-
-int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
-{
- struct btrfs_path *path;
- int ret;
- struct extent_buffer *l;
- struct btrfs_key search_key;
- struct btrfs_key found_key;
- int slot;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
- search_key.type = -1;
- search_key.offset = (u64)-1;
- ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
- if (ret < 0)
- goto error;
- BUG_ON(ret == 0); /* Corruption */
- if (path->slots[0] > 0) {
- slot = path->slots[0] - 1;
- l = path->nodes[0];
- btrfs_item_key_to_cpu(l, &found_key, slot);
- *objectid = max_t(u64, found_key.objectid,
- BTRFS_FIRST_FREE_OBJECTID - 1);
- } else {
- *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
- }
- ret = 0;
-error:
- btrfs_free_path(path);
- return ret;
-}
-
-int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
-{
- int ret;
- mutex_lock(&root->objectid_mutex);
-
- if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
- btrfs_warn(root->fs_info,
- "the objectid of root %llu reaches its highest value",
- root->root_key.objectid);
- ret = -ENOSPC;
- goto out;
- }
-
- *objectid = ++root->highest_objectid;
- ret = 0;
-out:
- mutex_unlock(&root->objectid_mutex);
- return ret;
-}
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
deleted file mode 100644
index 7a962811dffe..000000000000
--- a/fs/btrfs/inode-map.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef BTRFS_INODE_MAP_H
-#define BTRFS_INODE_MAP_H
-
-void btrfs_init_free_ino_ctl(struct btrfs_root *root);
-void btrfs_unpin_free_ino(struct btrfs_root *root);
-void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
-int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
-int btrfs_save_ino_cache(struct btrfs_root *root,
- struct btrfs_trans_handle *trans);
-
-int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
-int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
-
-#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7e8d8169779d..8e23780acfae 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -45,7 +45,6 @@
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
-#include "inode-map.h"
#include "props.h"
#include "qgroup.h"
#include "delalloc-space.h"
@@ -62,7 +61,6 @@ struct btrfs_dio_data {
loff_t length;
ssize_t submitted;
struct extent_changeset *data_reserved;
- bool sync;
};
static const struct inode_operations btrfs_dir_inode_operations;
@@ -96,6 +94,51 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
const bool uptodate);
/*
+ * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
+ *
+ * ilock_flags can have the following bit set:
+ *
+ * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
+ * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
+ * return -EAGAIN
+ */
+int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags)
+{
+ if (ilock_flags & BTRFS_ILOCK_SHARED) {
+ if (ilock_flags & BTRFS_ILOCK_TRY) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ else
+ return 0;
+ }
+ inode_lock_shared(inode);
+ } else {
+ if (ilock_flags & BTRFS_ILOCK_TRY) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ else
+ return 0;
+ }
+ inode_lock(inode);
+ }
+ return 0;
+}
+
+/*
+ * btrfs_inode_unlock - unock inode i_rwsem
+ *
+ * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
+ * to decide whether the lock acquired is shared or exclusive.
+ */
+void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
+{
+ if (ilock_flags & BTRFS_ILOCK_SHARED)
+ inode_unlock_shared(inode);
+ else
+ inode_unlock(inode);
+}
+
+/*
* Cleanup all submitted ordered extents in specified range to handle errors
* from the btrfs_run_delalloc_range() callback.
*
@@ -158,7 +201,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
* no overlapping inline items exist in the btree
*/
static int insert_inline_extent(struct btrfs_trans_handle *trans,
- struct btrfs_path *path, int extent_inserted,
+ struct btrfs_path *path, bool extent_inserted,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
int compress_type,
@@ -179,8 +222,6 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
if (compressed_size && compressed_pages)
cur_size = compressed_size;
- inode_add_bytes(inode, size);
-
if (!extent_inserted) {
struct btrfs_key key;
size_t datasize;
@@ -190,7 +231,6 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
key.type = BTRFS_EXTENT_DATA_KEY;
datasize = btrfs_file_extent_calc_inline_size(cur_size);
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (ret)
@@ -256,8 +296,6 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
* could end up racing with unlink.
*/
BTRFS_I(inode)->disk_i_size = inode->i_size;
- ret = btrfs_update_inode(trans, root, inode);
-
fail:
return ret;
}
@@ -273,6 +311,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
int compress_type,
struct page **compressed_pages)
{
+ struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
@@ -283,8 +322,6 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
u64 data_len = inline_len;
int ret;
struct btrfs_path *path;
- int extent_inserted = 0;
- u32 extent_item_size;
if (compressed_size)
data_len = compressed_size;
@@ -310,16 +347,20 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
}
trans->block_rsv = &inode->block_rsv;
+ drop_args.path = path;
+ drop_args.start = start;
+ drop_args.end = aligned_end;
+ drop_args.drop_cache = true;
+ drop_args.replace_extent = true;
+
if (compressed_size && compressed_pages)
- extent_item_size = btrfs_file_extent_calc_inline_size(
+ drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(
compressed_size);
else
- extent_item_size = btrfs_file_extent_calc_inline_size(
+ drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(
inline_len);
- ret = __btrfs_drop_extents(trans, root, inode, path, start, aligned_end,
- NULL, 1, 1, extent_item_size,
- &extent_inserted);
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -327,7 +368,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
if (isize > actual_end)
inline_len = min_t(u64, isize, actual_end);
- ret = insert_inline_extent(trans, path, extent_inserted,
+ ret = insert_inline_extent(trans, path, drop_args.extent_inserted,
root, &inode->vfs_inode, start,
inline_len, compressed_size,
compress_type, compressed_pages);
@@ -339,8 +380,17 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 start,
goto out;
}
+ btrfs_update_inode_bytes(inode, inline_len, drop_args.bytes_found);
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret && ret != -ENOSPC) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ } else if (ret == -ENOSPC) {
+ ret = 1;
+ goto out;
+ }
+
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
- btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
out:
/*
* Don't forget to free the reserved space, as for inlined extent
@@ -1598,6 +1648,15 @@ next_slot:
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
+
+ /*
+ * The following checks can be expensive, as they need to
+ * take other locks and do btree or rbtree searches, so
+ * release the path to avoid blocking other tasks for too
+ * long.
+ */
+ btrfs_release_path(path);
+
/* If extent is RO, we must COW it */
if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out_check;
@@ -1673,12 +1732,12 @@ out_check:
cur_offset = extent_end;
if (cur_offset > end)
break;
+ if (!path->nodes[0])
+ continue;
path->slots[0]++;
goto next_slot;
}
- btrfs_release_path(path);
-
/*
* COW range from cow_start to found_key.offset - 1. As the key
* will contain the beginning of the first extent that can be
@@ -2098,6 +2157,8 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
spin_lock(&inode->lock);
ASSERT(inode->new_delalloc_bytes >= len);
inode->new_delalloc_bytes -= len;
+ if (*bits & EXTENT_ADD_INODE_BYTES)
+ inode_add_bytes(&inode->vfs_inode, len);
spin_unlock(&inode->lock);
}
}
@@ -2121,7 +2182,7 @@ int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
{
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- u64 logical = (u64)bio->bi_iter.bi_sector << 9;
+ u64 logical = bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
@@ -2150,11 +2211,9 @@ int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
- u64 bio_offset)
+static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
+ u64 dio_file_offset)
{
- struct inode *inode = private_data;
-
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
}
@@ -2187,7 +2246,8 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
int skip_sum;
int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
- skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+ skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
+ !fs_info->csum_root;
if (btrfs_is_free_space_inode(BTRFS_I(inode)))
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
@@ -2202,8 +2262,13 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
mirror_num,
bio_flags);
goto out;
- } else if (!skip_sum) {
- ret = btrfs_lookup_bio_sums(inode, bio, (u64)-1, NULL);
+ } else {
+ /*
+ * Lookup bio sums does extra checks around whether we
+ * need to csum or not, which is why we ignore skip_sum
+ * here.
+ */
+ ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret)
goto out;
}
@@ -2213,8 +2278,8 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
- ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
- 0, inode, btrfs_submit_bio_start);
+ ret = btrfs_wq_submit_bio(inode, bio, mirror_num, bio_flags,
+ 0, btrfs_submit_bio_start);
goto out;
} else if (!skip_sum) {
ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
@@ -2282,8 +2347,8 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
ret = set_extent_bit(&inode->io_tree, search_start,
search_start + em_len - 1,
- EXTENT_DELALLOC_NEW,
- NULL, cached_state, GFP_NOFS);
+ EXTENT_DELALLOC_NEW, 0, NULL, cached_state,
+ GFP_NOFS, NULL);
next:
search_start = extent_map_end(em);
free_extent_map(em);
@@ -2511,9 +2576,11 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 file_pos,
struct btrfs_file_extent_item *stack_fi,
+ const bool update_inode_bytes,
u64 qgroup_reserved)
{
struct btrfs_root *root = inode->root;
+ const u64 sectorsize = root->fs_info->sectorsize;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
@@ -2521,7 +2588,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
- int extent_inserted = 0;
+ struct btrfs_drop_extents_args drop_args = { 0 };
int ret;
path = btrfs_alloc_path();
@@ -2537,18 +2604,20 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
- ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
- file_pos + num_bytes, NULL, 0,
- 1, sizeof(*stack_fi), &extent_inserted);
+ drop_args.path = path;
+ drop_args.start = file_pos;
+ drop_args.end = file_pos + num_bytes;
+ drop_args.replace_extent = true;
+ drop_args.extent_item_size = sizeof(*stack_fi);
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
if (ret)
goto out;
- if (!extent_inserted) {
+ if (!drop_args.extent_inserted) {
ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &ins,
sizeof(*stack_fi));
if (ret)
@@ -2563,7 +2632,24 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
- inode_add_bytes(&inode->vfs_inode, num_bytes);
+ /*
+ * If we dropped an inline extent here, we know the range where it is
+ * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
+ * number of bytes only for that range contaning the inline extent.
+ * The remaining of the range will be processed when clearning the
+ * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
+ */
+ if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
+ u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
+
+ inline_size = drop_args.bytes_found - inline_size;
+ btrfs_update_inode_bytes(inode, sectorsize, inline_size);
+ drop_args.bytes_found -= inline_size;
+ num_bytes -= sectorsize;
+ }
+
+ if (update_inode_bytes)
+ btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
@@ -2601,6 +2687,7 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
{
struct btrfs_file_extent_item stack_fi;
u64 logical_len;
+ bool update_inode_bytes;
memset(&stack_fi, 0, sizeof(stack_fi));
btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
@@ -2616,9 +2703,18 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
/* Encryption and other encoding is reserved and all 0 */
+ /*
+ * For delalloc, when completing an ordered extent we update the inode's
+ * bytes when clearing the range in the inode's io tree, so pass false
+ * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
+ * except if the ordered extent was truncated.
+ */
+ update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
+ test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
+
return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
oe->file_offset, &stack_fi,
- oe->qgroup_rsv);
+ update_inode_bytes, oe->qgroup_rsv);
}
/*
@@ -2628,11 +2724,11 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
*/
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
{
- struct inode *inode = ordered_extent->inode;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans = NULL;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_state *cached_state = NULL;
u64 start, end;
int compress_type = 0;
@@ -2640,10 +2736,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
u64 logical_len = ordered_extent->num_bytes;
bool freespace_inode;
bool truncated = false;
- bool range_locked = false;
- bool clear_new_delalloc_bytes = false;
bool clear_reserved_extent = true;
- unsigned int clear_bits;
+ unsigned int clear_bits = EXTENT_DEFRAG;
start = ordered_extent->file_offset;
end = start + ordered_extent->num_bytes - 1;
@@ -2651,16 +2745,16 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
- clear_new_delalloc_bytes = true;
+ clear_bits |= EXTENT_DELALLOC_NEW;
- freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
+ freespace_inode = btrfs_is_free_space_inode(inode);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
goto out;
}
- btrfs_free_io_failure_record(BTRFS_I(inode), start, end);
+ btrfs_free_io_failure_record(inode, start, end);
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true;
@@ -2683,14 +2777,14 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans = NULL;
goto out;
}
- trans->block_rsv = &BTRFS_I(inode)->block_rsv;
+ trans->block_rsv = &inode->block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
}
- range_locked = true;
+ clear_bits |= EXTENT_LOCKED;
lock_extent_bits(io_tree, start, end, &cached_state);
if (freespace_inode)
@@ -2703,13 +2797,13 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
- trans->block_rsv = &BTRFS_I(inode)->block_rsv;
+ trans->block_rsv = &inode->block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
- ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
+ ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
logical_len);
@@ -2723,8 +2817,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->disk_num_bytes);
}
}
- unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
- ordered_extent->file_offset,
+ unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset,
ordered_extent->num_bytes, trans->transid);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
@@ -2737,6 +2830,17 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
+ /*
+ * If this is a new delalloc range, clear its new delalloc flag to
+ * update the inode's number of bytes. This needs to be done first
+ * before updating the inode item.
+ */
+ if ((clear_bits & EXTENT_DELALLOC_NEW) &&
+ !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
+ clear_extent_bit(&inode->io_tree, start, end,
+ EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
+ 0, 0, &cached_state);
+
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
@@ -2745,12 +2849,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
}
ret = 0;
out:
- clear_bits = EXTENT_DEFRAG;
- if (range_locked)
- clear_bits |= EXTENT_LOCKED;
- if (clear_new_delalloc_bytes)
- clear_bits |= EXTENT_DELALLOC_NEW;
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits,
+ clear_extent_bit(&inode->io_tree, start, end, clear_bits,
(clear_bits & EXTENT_LOCKED) ? 1 : 0, 0,
&cached_state);
@@ -2765,7 +2864,7 @@ out:
clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
/* Drop the cache for the part of the extent we didn't write. */
- btrfs_drop_extent_cache(BTRFS_I(inode), unwritten_start, end, 0);
+ btrfs_drop_extent_cache(inode, unwritten_start, end, 0);
/*
* If the ordered extent had an IOERR or something else went
@@ -2800,7 +2899,7 @@ out:
* This needs to be done to make sure anybody waiting knows we are done
* updating everything for this ordered extent.
*/
- btrfs_remove_ordered_extent(BTRFS_I(inode), ordered_extent);
+ btrfs_remove_ordered_extent(inode, ordered_extent);
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
@@ -2841,18 +2940,32 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
btrfs_queue_work(wq, &ordered_extent->work);
}
+/*
+ * check_data_csum - verify checksum of one sector of uncompressed data
+ * @inode: inode
+ * @io_bio: btrfs_io_bio which contains the csum
+ * @bio_offset: offset to the beginning of the bio (in bytes)
+ * @page: page where is the data to be verified
+ * @pgoff: offset inside the page
+ *
+ * The length of such check is always one sector size.
+ */
static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
- int icsum, struct page *page, int pgoff, u64 start,
- size_t len)
+ u32 bio_offset, struct page *page, u32 pgoff)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
char *kaddr;
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ u32 len = fs_info->sectorsize;
+ const u32 csum_size = fs_info->csum_size;
+ unsigned int offset_sectors;
u8 *csum_expected;
u8 csum[BTRFS_CSUM_SIZE];
- csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size;
+ ASSERT(pgoff + len <= PAGE_SIZE);
+
+ offset_sectors = bio_offset >> fs_info->sectorsize_bits;
+ csum_expected = ((u8 *)io_bio->csum) + offset_sectors * csum_size;
kaddr = kmap_atomic(page);
shash->tfm = fs_info->csum_shash;
@@ -2865,8 +2978,8 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
kunmap_atomic(kaddr);
return 0;
zeroit:
- btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
- io_bio->mirror_num);
+ btrfs_print_data_csum_error(BTRFS_I(inode), page_offset(page) + pgoff,
+ csum, csum_expected, io_bio->mirror_num);
if (io_bio->device)
btrfs_dev_stat_inc_and_print(io_bio->device,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
@@ -2877,17 +2990,23 @@ zeroit:
}
/*
- * when reads are done, we need to check csums to verify the data is correct
+ * When reads are done, we need to check csums to verify the data is correct.
* if there's a match, we allow the bio to finish. If not, the code in
* extent_io.c will try to find good copies for us.
+ *
+ * @bio_offset: offset to the beginning of the bio (in bytes)
+ * @start: file offset of the range start
+ * @end: file offset of the range end (inclusive)
+ * @mirror: mirror number
*/
-int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u64 phy_offset,
+int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
struct page *page, u64 start, u64 end, int mirror)
{
- size_t offset = start - page_offset(page);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ const u32 sectorsize = root->fs_info->sectorsize;
+ u32 pg_off;
if (PageChecked(page)) {
ClearPageChecked(page);
@@ -2897,15 +3016,27 @@ int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u64 phy_offset,
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
return 0;
+ if (!root->fs_info->csum_root)
+ return 0;
+
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
return 0;
}
- phy_offset >>= inode->i_sb->s_blocksize_bits;
- return check_data_csum(inode, io_bio, phy_offset, page, offset, start,
- (size_t)(end - start + 1));
+ ASSERT(page_offset(page) <= start &&
+ end <= page_offset(page) + PAGE_SIZE - 1);
+ for (pg_off = offset_in_page(start);
+ pg_off < offset_in_page(end);
+ pg_off += sectorsize, bio_offset += sectorsize) {
+ int ret;
+
+ ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off);
+ if (ret < 0)
+ return -EIO;
+ }
+ return 0;
}
/*
@@ -3515,7 +3646,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode)
+ struct btrfs_root *root,
+ struct btrfs_inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
@@ -3526,9 +3658,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
- ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
- 1);
+ ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -3539,9 +3669,9 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
- fill_inode_item(trans, leaf, inode_item, inode);
+ fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
btrfs_mark_buffer_dirty(leaf);
- btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
+ btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
@@ -3552,7 +3682,8 @@ failed:
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode)
+ struct btrfs_root *root,
+ struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -3564,23 +3695,22 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
* The data relocation inode should also be directly updated
* without delay
*/
- if (!btrfs_is_free_space_inode(BTRFS_I(inode))
+ if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
- btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
+ btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
}
-noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode)
+int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_inode *inode)
{
int ret;
@@ -3615,7 +3745,6 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
goto out;
}
- path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
@@ -3695,7 +3824,7 @@ err:
inode_inc_iversion(&dir->vfs_inode);
inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
+ ret = btrfs_update_inode(trans, root, dir);
out:
return ret;
}
@@ -3709,7 +3838,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
if (!ret) {
drop_nlink(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
+ ret = btrfs_update_inode(trans, root, inode);
}
return ret;
}
@@ -3858,7 +3987,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
inode_inc_iversion(dir);
dir->i_mtime = dir->i_ctime = current_time(dir);
- ret = btrfs_update_inode_fallback(trans, root, dir);
+ ret = btrfs_update_inode_fallback(trans, root, BTRFS_I(dir));
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@@ -3995,7 +4124,6 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
struct btrfs_block_rsv block_rsv;
u64 root_flags;
int ret;
- int err;
/*
* Don't allow to delete a subvolume with send in progress. This is
@@ -4017,8 +4145,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
down_write(&fs_info->subvol_sem);
- err = may_destroy_subvol(dest);
- if (err)
+ ret = may_destroy_subvol(dest);
+ if (ret)
goto out_up_write;
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
@@ -4027,13 +4155,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
* two for dir entries,
* two for root ref/backref.
*/
- err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
- if (err)
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
+ if (ret)
goto out_up_write;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_release;
}
trans->block_rsv = &block_rsv;
@@ -4043,7 +4171,6 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
ret = btrfs_unlink_subvol(trans, dir, dentry);
if (ret) {
- err = ret;
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -4052,7 +4179,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
memset(&dest->root_item.drop_progress, 0,
sizeof(dest->root_item.drop_progress));
- dest->root_item.drop_level = 0;
+ btrfs_set_root_drop_level(&dest->root_item, 0);
btrfs_set_root_refs(&dest->root_item, 0);
if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
@@ -4061,7 +4188,6 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
dest->root_key.objectid);
if (ret) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
}
@@ -4071,7 +4197,6 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
@@ -4081,7 +4206,6 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
- err = ret;
goto out_end_trans;
}
}
@@ -4092,14 +4216,12 @@ out_end_trans:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
ret = btrfs_end_transaction(trans);
- if (ret && !err)
- err = ret;
inode->i_flags |= S_DEAD;
out_release:
btrfs_subvolume_release_metadata(root, &block_rsv);
out_up_write:
up_write(&fs_info->subvol_sem);
- if (err) {
+ if (ret) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
@@ -4109,15 +4231,9 @@ out_up_write:
d_invalidate(dentry);
btrfs_prune_dentries(dest);
ASSERT(dest->send_in_progress == 0);
-
- /* the last ref */
- if (dest->ino_cache_inode) {
- iput(dest->ino_cache_inode);
- dest->ino_cache_inode = NULL;
- }
}
- return err;
+ return ret;
}
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -4194,7 +4310,7 @@ out:
*/
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct inode *inode,
+ struct btrfs_inode *inode,
u64 new_size, u32 min_type)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4215,7 +4331,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
int pending_del_slot = 0;
int extent_type = -1;
int ret;
- u64 ino = btrfs_ino(BTRFS_I(inode));
+ u64 ino = btrfs_ino(inode);
u64 bytes_deleted = 0;
bool be_nice = false;
bool should_throttle = false;
@@ -4229,7 +4345,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* off from time to time. This means all inodes in subvolume roots,
* reloc roots, and data reloc roots.
*/
- if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
+ if (!btrfs_is_free_space_inode(inode) &&
test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
be_nice = true;
@@ -4239,7 +4355,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
path->reada = READA_BACK;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
+ lock_extent_bits(&inode->io_tree, lock_start, (u64)-1,
&cached_state);
/*
@@ -4247,7 +4363,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* new size is not block aligned since we will be keeping the
* last block of the extent just the way it is.
*/
- btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
+ btrfs_drop_extent_cache(inode, ALIGN(new_size,
fs_info->sectorsize),
(u64)-1, 0);
}
@@ -4258,8 +4374,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* it is used to drop the logged items. So we shouldn't kill the delayed
* items.
*/
- if (min_type == 0 && root == BTRFS_I(inode)->root)
- btrfs_kill_delayed_inode_items(BTRFS_I(inode));
+ if (min_type == 0 && root == inode->root)
+ btrfs_kill_delayed_inode_items(inode);
key.objectid = ino;
key.offset = (u64)-1;
@@ -4315,14 +4431,13 @@ search_again:
btrfs_file_extent_num_bytes(leaf, fi);
trace_btrfs_truncate_show_fi_regular(
- BTRFS_I(inode), leaf, fi,
- found_key.offset);
+ inode, leaf, fi, found_key.offset);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_ram_bytes(leaf,
fi);
trace_btrfs_truncate_show_fi_inline(
- BTRFS_I(inode), leaf, fi, path->slots[0],
+ inode, leaf, fi, path->slots[0],
found_key.offset);
}
item_end--;
@@ -4361,7 +4476,8 @@ search_again:
if (test_bit(BTRFS_ROOT_SHAREABLE,
&root->state) &&
extent_start != 0)
- inode_sub_bytes(inode, num_dec);
+ inode_sub_bytes(&inode->vfs_inode,
+ num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
@@ -4376,7 +4492,8 @@ search_again:
found_extent = 1;
if (test_bit(BTRFS_ROOT_SHAREABLE,
&root->state))
- inode_sub_bytes(inode, num_dec);
+ inode_sub_bytes(&inode->vfs_inode,
+ num_dec);
}
}
clear_len = num_dec;
@@ -4411,7 +4528,8 @@ search_again:
}
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- inode_sub_bytes(inode, item_end + 1 - new_size);
+ inode_sub_bytes(&inode->vfs_inode,
+ item_end + 1 - new_size);
}
delete:
/*
@@ -4419,8 +4537,8 @@ delete:
* multiple fsyncs, and in this case we don't want to clear the
* file extent range because it's just the log.
*/
- if (root == BTRFS_I(inode)->root) {
- ret = btrfs_inode_clear_file_extent_range(BTRFS_I(inode),
+ if (root == inode->root) {
+ ret = btrfs_inode_clear_file_extent_range(inode,
clear_start, clear_len);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -4529,8 +4647,8 @@ out:
if (!ret && last_size > new_size)
last_size = new_size;
btrfs_inode_safe_disk_i_size_write(inode, last_size);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
- (u64)-1, &cached_state);
+ unlock_extent_cached(&inode->io_tree, lock_start, (u64)-1,
+ &cached_state);
}
btrfs_free_path(path);
@@ -4548,12 +4666,12 @@ out:
* This will find the block for the "from" offset and cow the block and zero the
* part we want to zero. This is used with truncate and hole punching.
*/
-int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
- int front)
+int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
+ int front)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct address_space *mapping = inode->i_mapping;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct address_space *mapping = inode->vfs_inode.i_mapping;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
@@ -4576,30 +4694,29 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
block_start = round_down(from, blocksize);
block_end = block_start + blocksize - 1;
- ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved,
- block_start, blocksize);
+ ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
+ blocksize);
if (ret < 0) {
- if (btrfs_check_nocow_lock(BTRFS_I(inode), block_start,
- &write_bytes) > 0) {
+ if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) {
/* For nocow case, no need to reserve data space */
only_release_metadata = true;
} else {
goto out;
}
}
- ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), blocksize);
+ ret = btrfs_delalloc_reserve_metadata(inode, blocksize);
if (ret < 0) {
if (!only_release_metadata)
- btrfs_free_reserved_data_space(BTRFS_I(inode),
- data_reserved, block_start, blocksize);
+ btrfs_free_reserved_data_space(inode, data_reserved,
+ block_start, blocksize);
goto out;
}
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
- btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
- block_start, blocksize, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
+ btrfs_delalloc_release_space(inode, data_reserved, block_start,
+ blocksize, true);
+ btrfs_delalloc_release_extents(inode, blocksize);
ret = -ENOMEM;
goto out;
}
@@ -4622,7 +4739,7 @@ again:
lock_extent_bits(io_tree, block_start, block_end, &cached_state);
set_page_extent_mapped(page);
- ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), block_start);
+ ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state);
@@ -4633,11 +4750,11 @@ again:
goto again;
}
- clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
+ clear_extent_bit(&inode->io_tree, block_start, block_end,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state);
- ret = btrfs_set_extent_delalloc(BTRFS_I(inode), block_start, block_end, 0,
+ ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
@@ -4663,34 +4780,33 @@ again:
unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
if (only_release_metadata)
- set_extent_bit(&BTRFS_I(inode)->io_tree, block_start,
- block_end, EXTENT_NORESERVE, NULL, NULL,
- GFP_NOFS);
+ set_extent_bit(&inode->io_tree, block_start, block_end,
+ EXTENT_NORESERVE, 0, NULL, NULL, GFP_NOFS, NULL);
out_unlock:
if (ret) {
if (only_release_metadata)
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- blocksize, true);
+ btrfs_delalloc_release_metadata(inode, blocksize, true);
else
- btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
+ btrfs_delalloc_release_space(inode, data_reserved,
block_start, blocksize, true);
}
- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
+ btrfs_delalloc_release_extents(inode, blocksize);
unlock_page(page);
put_page(page);
out:
if (only_release_metadata)
- btrfs_check_nocow_unlock(BTRFS_I(inode));
+ btrfs_check_nocow_unlock(inode);
extent_changeset_free(data_reserved);
return ret;
}
-static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
+static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
u64 offset, u64 len)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
+ struct btrfs_drop_extents_args drop_args = { 0 };
int ret;
/*
@@ -4698,9 +4814,9 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
* that any holes get logged if we fsync.
*/
if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
- BTRFS_I(inode)->last_trans = fs_info->generation;
- BTRFS_I(inode)->last_sub_trans = root->log_transid;
- BTRFS_I(inode)->last_log_commit = root->last_log_commit;
+ inode->last_trans = fs_info->generation;
+ inode->last_sub_trans = root->log_transid;
+ inode->last_log_commit = root->last_log_commit;
return 0;
}
@@ -4713,19 +4829,25 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
+ drop_args.start = offset;
+ drop_args.end = offset + len;
+ drop_args.drop_cache = true;
+
+ ret = btrfs_drop_extents(trans, root, inode, &drop_args);
if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
}
- ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
+ ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
offset, 0, 0, len, 0, len, 0, 0, 0);
- if (ret)
+ if (ret) {
btrfs_abort_transaction(trans, ret);
- else
+ } else {
+ btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
btrfs_update_inode(trans, root, inode);
+ }
btrfs_end_transaction(trans);
return ret;
}
@@ -4736,14 +4858,14 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
* these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
* the range between oldsize and size
*/
-int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
+int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_io_tree *io_tree = &inode->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map_tree *em_tree = &inode->extent_tree;
u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
u64 block_end = ALIGN(size, fs_info->sectorsize);
u64 last_byte;
@@ -4763,11 +4885,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (size <= hole_start)
return 0;
- btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), hole_start,
- block_end - 1, &cached_state);
+ btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
+ &cached_state);
cur_offset = hole_start;
while (1) {
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
+ em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset);
if (IS_ERR(em)) {
err = PTR_ERR(em);
@@ -4786,17 +4908,17 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (err)
break;
- err = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
+ err = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
if (err)
break;
- btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
+ btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + hole_size - 1, 0);
hole_em = alloc_extent_map();
if (!hole_em) {
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+ &inode->runtime_flags);
goto next;
}
hole_em->start = cur_offset;
@@ -4816,14 +4938,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
write_unlock(&em_tree->lock);
if (err != -EEXIST)
break;
- btrfs_drop_extent_cache(BTRFS_I(inode),
- cur_offset,
+ btrfs_drop_extent_cache(inode, cur_offset,
cur_offset +
hole_size - 1, 0);
}
free_extent_map(hole_em);
} else {
- err = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
+ err = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
if (err)
break;
@@ -4871,7 +4992,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* this truncation.
*/
btrfs_drew_write_lock(&root->snapshot_lock);
- ret = btrfs_cont_expand(inode, oldsize, newsize);
+ ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
if (ret) {
btrfs_drew_write_unlock(&root->snapshot_lock);
return ret;
@@ -4884,9 +5005,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
}
i_size_write(inode, newsize);
- btrfs_inode_safe_disk_i_size_write(inode, 0);
+ btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
pagecache_isize_extended(inode, oldsize, newsize);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans);
} else {
@@ -5157,7 +5278,8 @@ void btrfs_evict_inode(struct inode *inode)
trans->block_rsv = rsv;
- ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
+ ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
+ 0, 0);
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
@@ -5184,10 +5306,6 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_end_transaction(trans);
}
- if (!(root == fs_info->tree_root ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
- btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
-
free_rsv:
btrfs_free_block_rsv(fs_info, rsv);
no_delete:
@@ -5797,7 +5915,7 @@ static int btrfs_dirty_inode(struct inode *inode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans);
@@ -5805,7 +5923,7 @@ static int btrfs_dirty_inode(struct inode *inode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
}
btrfs_end_transaction(trans);
if (BTRFS_I(inode)->delayed_node)
@@ -6068,7 +6186,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
goto fail;
}
- path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
if (ret != 0)
goto fail_unlock;
@@ -6194,7 +6311,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
parent_inode->vfs_inode.i_mtime = now;
parent_inode->vfs_inode.i_ctime = now;
}
- ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
+ ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
return ret;
@@ -6254,7 +6371,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -6285,7 +6402,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (err)
goto out_unlock;
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, root, BTRFS_I(inode));
d_instantiate_new(dentry, inode);
out_unlock:
@@ -6318,7 +6435,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -6344,7 +6461,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (err)
goto out_unlock;
- err = btrfs_update_inode(trans, root, inode);
+ err = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (err)
goto out_unlock;
@@ -6416,7 +6533,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
} else {
struct dentry *parent = dentry->d_parent;
- err = btrfs_update_inode(trans, root, inode);
+ err = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (err)
goto fail;
if (inode->i_nlink == 1) {
@@ -6462,7 +6579,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_fail;
@@ -6484,7 +6601,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out_fail;
btrfs_i_size_write(BTRFS_I(inode), 0);
- err = btrfs_update_inode(trans, root, inode);
+ err = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (err)
goto out_fail;
@@ -6621,12 +6738,14 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
path->reada = READA_FORWARD;
/*
- * Unless we're going to uncompress the inline extent, no sleep would
- * happen.
+ * The same explanation in load_free_space_cache applies here as well,
+ * we only read when we're loading the free space cache, and at that
+ * point the commit_root has everything we need.
*/
- path->leave_spinning = 1;
-
- path->recurse = btrfs_is_free_space_inode(inode);
+ if (btrfs_is_free_space_inode(inode)) {
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+ }
ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
if (ret < 0) {
@@ -6728,7 +6847,6 @@ next:
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
- btrfs_set_path_blocking(path);
if (!PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
@@ -7377,17 +7495,6 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
int ret = 0;
u64 len = length;
bool unlock_extents = false;
- bool sync = (current->journal_info == BTRFS_DIO_SYNC_STUB);
-
- /*
- * We used current->journal_info here to see if we were sync, but
- * there's a lot of tests in the enospc machinery to not do flushing if
- * we have a journal_info set, so we need to clear this out and re-set
- * it in iomap_end.
- */
- ASSERT(current->journal_info == NULL ||
- current->journal_info == BTRFS_DIO_SYNC_STUB);
- current->journal_info = NULL;
if (!write)
len = min_t(u64, len, fs_info->sectorsize);
@@ -7413,7 +7520,6 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
if (!dio_data)
return -ENOMEM;
- dio_data->sync = sync;
dio_data->length = length;
if (write) {
dio_data->reserve = round_up(length, fs_info->sectorsize);
@@ -7561,14 +7667,6 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
extent_changeset_free(dio_data->data_reserved);
}
out:
- /*
- * We're all done, we can re-set the current->journal_info now safely
- * for our endio.
- */
- if (dio_data->sync) {
- ASSERT(current->journal_info == NULL);
- current->journal_info = BTRFS_DIO_SYNC_STUB;
- }
kfree(dio_data);
iomap->private = NULL;
@@ -7632,7 +7730,7 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
struct bio_vec bvec;
struct bvec_iter iter;
u64 start = io_bio->logical;
- int icsum = 0;
+ u32 bio_offset = 0;
blk_status_t err = BLK_STS_OK;
__bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) {
@@ -7643,9 +7741,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
for (i = 0; i < nr_sectors; i++) {
ASSERT(pgoff < PAGE_SIZE);
if (uptodate &&
- (!csum || !check_data_csum(inode, io_bio, icsum,
- bvec.bv_page, pgoff,
- start, sectorsize))) {
+ (!csum || !check_data_csum(inode, io_bio,
+ bio_offset, bvec.bv_page, pgoff))) {
clean_io_failure(fs_info, failure_tree, io_tree,
start, bvec.bv_page,
btrfs_ino(BTRFS_I(inode)),
@@ -7653,6 +7750,7 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
} else {
blk_status_t status;
+ ASSERT((start - io_bio->logical) < UINT_MAX);
status = btrfs_submit_read_repair(inode,
&io_bio->bio,
start - io_bio->logical,
@@ -7665,7 +7763,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
err = status;
}
start += sectorsize;
- icsum++;
+ ASSERT(bio_offset + sectorsize > bio_offset);
+ bio_offset += sectorsize;
pgoff += sectorsize;
}
}
@@ -7715,12 +7814,11 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
}
}
-static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
- struct bio *bio, u64 offset)
+static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
+ struct bio *bio,
+ u64 dio_file_offset)
{
- struct inode *inode = private_data;
-
- return btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1);
+ return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, 1);
}
static void btrfs_end_dio_bio(struct bio *bio)
@@ -7732,8 +7830,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
"direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
- bio->bi_opf,
- (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_opf, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, err);
if (bio_op(bio) == REQ_OP_READ) {
@@ -7770,8 +7867,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
goto map;
if (write && async_submit) {
- ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
- file_offset, inode,
+ ret = btrfs_wq_submit_bio(inode, bio, 0, 0, file_offset,
btrfs_submit_bio_start_direct_io);
goto err;
} else if (write) {
@@ -7786,8 +7882,8 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
u64 csum_offset;
csum_offset = file_offset - dip->logical_offset;
- csum_offset >>= inode->i_sb->s_blocksize_bits;
- csum_offset *= btrfs_super_csum_size(fs_info->super_copy);
+ csum_offset >>= fs_info->sectorsize_bits;
+ csum_offset *= fs_info->csum_size;
btrfs_io_bio(bio)->csum = dip->csums + csum_offset;
}
map:
@@ -7812,11 +7908,10 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
dip_size = sizeof(*dip);
if (!write && csum) {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
size_t nblocks;
- nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
- dip_size += csum_size * nblocks;
+ nblocks = dio_bio->bi_iter.bi_size >> fs_info->sectorsize_bits;
+ dip_size += fs_info->csum_size * nblocks;
}
dip = kzalloc(dip_size, GFP_NOFS);
@@ -7826,7 +7921,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
dip->inode = inode;
dip->logical_offset = file_offset;
dip->bytes = dio_bio->bi_iter.bi_size;
- dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
+ dip->disk_bytenr = dio_bio->bi_iter.bi_sector << 9;
dip->dio_bio = dio_bio;
refcount_set(&dip->refs, 1);
return dip;
@@ -7836,7 +7931,6 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
struct bio *dio_bio, loff_t file_offset)
{
const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
- const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
BTRFS_BLOCK_GROUP_RAID56_MASK);
@@ -7863,13 +7957,14 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
return BLK_QC_T_NONE;
}
- if (!write && csum) {
+ if (!write) {
/*
* Load the csums up front to reduce csum tree searches and
* contention when submitting bios.
+ *
+ * If we have csums disabled this will do nothing.
*/
- status = btrfs_lookup_bio_sums(inode, dio_bio, file_offset,
- dip->csums);
+ status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums);
if (status != BLK_STS_OK)
goto out_err;
}
@@ -7944,129 +8039,15 @@ out_err:
return BLK_QC_T_NONE;
}
-static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
- const struct iov_iter *iter, loff_t offset)
-{
- int seg;
- int i;
- unsigned int blocksize_mask = fs_info->sectorsize - 1;
- ssize_t retval = -EINVAL;
-
- if (offset & blocksize_mask)
- goto out;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- goto out;
-
- /* If this is a write we don't need to check anymore */
- if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
- return 0;
- /*
- * Check to make sure we don't have duplicate iov_base's in this
- * iovec, if so return EINVAL, otherwise we'll get csum errors
- * when reading back.
- */
- for (seg = 0; seg < iter->nr_segs; seg++) {
- for (i = seg + 1; i < iter->nr_segs; i++) {
- if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
- goto out;
- }
- }
- retval = 0;
-out:
- return retval;
-}
-
-static inline int btrfs_maybe_fsync_end_io(struct kiocb *iocb, ssize_t size,
- int error, unsigned flags)
-{
- /*
- * Now if we're still in the context of our submitter we know we can't
- * safely run generic_write_sync(), so clear our flag here so that the
- * caller knows to follow up with a sync.
- */
- if (current->journal_info == BTRFS_DIO_SYNC_STUB) {
- current->journal_info = NULL;
- return error;
- }
-
- if (error)
- return error;
-
- if (size) {
- iocb->ki_flags |= IOCB_DSYNC;
- return generic_write_sync(iocb, size);
- }
-
- return 0;
-}
-
-static const struct iomap_ops btrfs_dio_iomap_ops = {
+const struct iomap_ops btrfs_dio_iomap_ops = {
.iomap_begin = btrfs_dio_iomap_begin,
.iomap_end = btrfs_dio_iomap_end,
};
-static const struct iomap_dio_ops btrfs_dio_ops = {
+const struct iomap_dio_ops btrfs_dio_ops = {
.submit_io = btrfs_submit_direct,
};
-static const struct iomap_dio_ops btrfs_sync_dops = {
- .submit_io = btrfs_submit_direct,
- .end_io = btrfs_maybe_fsync_end_io,
-};
-
-ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_changeset *data_reserved = NULL;
- loff_t offset = iocb->ki_pos;
- size_t count = 0;
- bool relock = false;
- ssize_t ret;
-
- if (check_direct_IO(fs_info, iter, offset))
- return 0;
-
- count = iov_iter_count(iter);
- if (iov_iter_rw(iter) == WRITE) {
- /*
- * If the write DIO is beyond the EOF, we need update
- * the isize, but it is protected by i_mutex. So we can
- * not unlock the i_mutex at this case.
- */
- if (offset + count <= inode->i_size) {
- inode_unlock(inode);
- relock = true;
- }
- down_read(&BTRFS_I(inode)->dio_sem);
- }
-
- /*
- * We have are actually a sync iocb, so we need our fancy endio to know
- * if we need to sync.
- */
- if (current->journal_info)
- ret = iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops,
- &btrfs_sync_dops, is_sync_kiocb(iocb));
- else
- ret = iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops,
- &btrfs_dio_ops, is_sync_kiocb(iocb));
-
- if (ret == -ENOTBLK)
- ret = 0;
-
- if (iov_iter_rw(iter) == WRITE)
- up_read(&BTRFS_I(inode)->dio_sem);
-
- if (relock)
- inode_lock(inode);
-
- extent_changeset_free(data_reserved);
- return ret;
-}
-
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -8186,6 +8167,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
u64 start;
u64 end;
int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
+ bool found_ordered = false;
+ bool completed_ordered = false;
/*
* we have the page locked, so new writeback can't start,
@@ -8207,15 +8190,17 @@ again:
start = page_start;
ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1);
if (ordered) {
+ found_ordered = true;
end = min(page_end,
ordered->file_offset + ordered->num_bytes - 1);
/*
- * IO on this page will never be started, so we need
- * to account for any ordered extents now
+ * IO on this page will never be started, so we need to account
+ * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
+ * here, must leave that up for the ordered extent completion.
*/
if (!inode_evicting)
clear_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 0, &cached_state);
/*
@@ -8237,8 +8222,10 @@ again:
if (btrfs_dec_test_ordered_pending(inode, &ordered,
start,
- end - start + 1, 1))
+ end - start + 1, 1)) {
btrfs_finish_ordered_io(ordered);
+ completed_ordered = true;
+ }
}
btrfs_put_ordered_extent(ordered);
if (!inode_evicting) {
@@ -8267,10 +8254,23 @@ again:
*/
btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
if (!inode_evicting) {
+ bool delete = true;
+
+ /*
+ * If there's an ordered extent for this range and we have not
+ * finished it ourselves, we must leave EXTENT_DELALLOC_NEW set
+ * in the range for the ordered extent completion. We must also
+ * not delete the range, otherwise we would lose that bit (and
+ * any other bits set in the range). Make sure EXTENT_UPTODATE
+ * is cleared if we don't delete, otherwise it can lead to
+ * corruptions if the i_size is extented later.
+ */
+ if (found_ordered && !completed_ordered)
+ delete = false;
clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
- EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
- &cached_state);
+ EXTENT_DELALLOC | EXTENT_UPTODATE |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1,
+ delete, &cached_state);
__btrfs_releasepage(page, GFP_NOFS);
}
@@ -8519,14 +8519,14 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
trans->block_rsv = rsv;
while (1) {
- ret = btrfs_truncate_inode_items(trans, root, inode,
+ ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode),
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN)
break;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret)
break;
@@ -8557,7 +8557,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
+ ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
if (ret)
goto out;
trans = btrfs_start_transaction(root, 1);
@@ -8565,14 +8565,14 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
ret = PTR_ERR(trans);
goto out;
}
- btrfs_inode_safe_disk_i_size_write(inode, 0);
+ btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
if (trans) {
int ret2;
trans->block_rsv = &fs_info->trans_block_rsv;
- ret2 = btrfs_update_inode(trans, root, inode);
+ ret2 = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret2 && !ret)
ret = ret2;
@@ -8618,7 +8618,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
"error inheriting subvolume %llu properties: %d",
new_root->root_key.objectid, err);
- err = btrfs_update_inode(trans, new_root, inode);
+ err = btrfs_update_inode(trans, new_root, BTRFS_I(inode));
iput(inode);
return err;
@@ -8680,7 +8680,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
RB_CLEAR_NODE(&ei->rb_node);
- init_rwsem(&ei->dio_sem);
return inode;
}
@@ -8820,6 +8819,7 @@ static int btrfs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
u64 delalloc_bytes;
+ u64 inode_bytes;
struct inode *inode = d_inode(path->dentry);
u32 blocksize = inode->i_sb->s_blocksize;
u32 bi_flags = BTRFS_I(inode)->flags;
@@ -8846,8 +8846,9 @@ static int btrfs_getattr(const struct path *path, struct kstat *stat,
spin_lock(&BTRFS_I(inode)->lock);
delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
+ inode_bytes = inode_get_bytes(inode);
spin_unlock(&BTRFS_I(inode)->lock);
- stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
+ stat->blocks = (ALIGN(inode_bytes, blocksize) +
ALIGN(delalloc_bytes, blocksize)) >> 9;
return 0;
}
@@ -8973,7 +8974,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
- ret = btrfs_update_inode(trans, root, old_inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -8989,7 +8990,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
new_dentry->d_name.name,
new_dentry->d_name.len);
if (!ret)
- ret = btrfs_update_inode(trans, dest, new_inode);
+ ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -9078,7 +9079,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
u64 objectid;
u64 index;
- ret = btrfs_find_free_ino(root, &objectid);
+ ret = btrfs_find_free_objectid(root, &objectid);
if (ret)
return ret;
@@ -9109,7 +9110,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
out:
unlock_new_inode(inode);
if (ret)
@@ -9243,7 +9244,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
- ret = btrfs_update_inode(trans, root, old_inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -9541,7 +9542,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_ino(root, &objectid);
+ err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -9603,7 +9604,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode_nohighmem(inode);
inode_set_bytes(inode, name_len);
btrfs_i_size_write(BTRFS_I(inode), name_len);
- err = btrfs_update_inode(trans, root, inode);
+ err = btrfs_update_inode(trans, root, BTRFS_I(inode));
/*
* Last step, add directory indexes for our symlink inode. This is the
* last step to avoid extra cleanup of these indexes if an error happens
@@ -9629,7 +9630,8 @@ out_unlock:
static struct btrfs_trans_handle *insert_prealloc_file_extent(
struct btrfs_trans_handle *trans_in,
- struct inode *inode, struct btrfs_key *ins,
+ struct btrfs_inode *inode,
+ struct btrfs_key *ins,
u64 file_offset)
{
struct btrfs_file_extent_item stack_fi;
@@ -9650,13 +9652,14 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
/* Encryption and other encoding is reserved and all 0 */
- ret = btrfs_qgroup_release_data(BTRFS_I(inode), file_offset, len);
+ ret = btrfs_qgroup_release_data(inode, file_offset, len);
if (ret < 0)
return ERR_PTR(ret);
if (trans) {
- ret = insert_reserved_file_extent(trans, BTRFS_I(inode),
- file_offset, &stack_fi, ret);
+ ret = insert_reserved_file_extent(trans, inode,
+ file_offset, &stack_fi,
+ true, ret);
if (ret)
return ERR_PTR(ret);
return trans;
@@ -9676,7 +9679,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
if (!path)
return ERR_PTR(-ENOMEM);
- ret = btrfs_replace_file_extents(inode, path, file_offset,
+ ret = btrfs_replace_file_extents(&inode->vfs_inode, path, file_offset,
file_offset + len - 1, &extent_info,
&trans);
btrfs_free_path(path);
@@ -9732,7 +9735,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
clear_offset += ins.offset;
last_alloc = ins.offset;
- trans = insert_prealloc_file_extent(trans, inode, &ins, cur_offset);
+ trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
+ &ins, cur_offset);
/*
* Now that we inserted the prealloc extent we can finally
* decrement the number of reservations in the block group.
@@ -9794,10 +9798,10 @@ next:
else
i_size = cur_offset;
i_size_write(inode, i_size);
- btrfs_inode_safe_disk_i_size_write(inode, 0);
+ btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -9872,7 +9876,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_find_free_ino(root, &objectid);
+ ret = btrfs_find_free_objectid(root, &objectid);
if (ret)
goto out;
@@ -9893,7 +9897,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (ret)
goto out;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret)
goto out;
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
@@ -10272,6 +10276,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
}
#endif
+/*
+ * Update the number of bytes used in the VFS' inode. When we replace extents in
+ * a range (clone, dedupe, fallocate's zero range), we must update the number of
+ * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
+ * always get a correct value.
+ */
+void btrfs_update_inode_bytes(struct btrfs_inode *inode,
+ const u64 add_bytes,
+ const u64 del_bytes)
+{
+ if (add_bytes == del_bytes)
+ return;
+
+ spin_lock(&inode->lock);
+ if (del_bytes > 0)
+ inode_sub_bytes(&inode->vfs_inode, del_bytes);
+ if (add_bytes > 0)
+ inode_add_bytes(&inode->vfs_inode, add_bytes);
+ spin_unlock(&inode->lock);
+}
+
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 69a384145dc6..703212ff50a5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -34,7 +34,6 @@
#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
-#include "inode-map.h"
#include "backref.h"
#include "rcu-string.h"
#include "send.h"
@@ -193,6 +192,15 @@ static int check_fsflags(unsigned int old_flags, unsigned int flags)
return 0;
}
+static int check_fsflags_compatible(struct btrfs_fs_info *fs_info,
+ unsigned int flags)
+{
+ if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL))
+ return -EPERM;
+
+ return 0;
+}
+
static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
@@ -230,6 +238,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
if (ret)
goto out_unlock;
+ ret = check_fsflags_compatible(fs_info, fsflags);
+ if (ret)
+ goto out_unlock;
+
binode_flags = binode->flags;
if (fsflags & FS_SYNC_FL)
binode_flags |= BTRFS_INODE_SYNC;
@@ -336,7 +348,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
btrfs_sync_inode_flags_to_i_flags(inode);
inode_inc_iversion(inode);
inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
out_end_trans:
btrfs_end_transaction(trans);
@@ -479,7 +491,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
btrfs_sync_inode_flags_to_i_flags(inode);
inode_inc_iversion(inode);
inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
btrfs_end_transaction(trans);
@@ -733,7 +745,7 @@ static noinline int create_subvol(struct inode *dir,
}
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
- ret = btrfs_update_inode(trans, root, dir);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -1275,6 +1287,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
u64 page_end;
u64 page_cnt;
u64 start = (u64)start_index << PAGE_SHIFT;
+ u64 search_start;
int ret;
int i;
int i_done;
@@ -1371,6 +1384,40 @@ again:
lock_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state);
+
+ /*
+ * When defragmenting we skip ranges that have holes or inline extents,
+ * (check should_defrag_range()), to avoid unnecessary IO and wasting
+ * space. At btrfs_defrag_file(), we check if a range should be defragged
+ * before locking the inode and then, if it should, we trigger a sync
+ * page cache readahead - we lock the inode only after that to avoid
+ * blocking for too long other tasks that possibly want to operate on
+ * other file ranges. But before we were able to get the inode lock,
+ * some other task may have punched a hole in the range, or we may have
+ * now an inline extent, in which case we should not defrag. So check
+ * for that here, where we have the inode and the range locked, and bail
+ * out if that happened.
+ */
+ search_start = page_start;
+ while (search_start < page_end) {
+ struct extent_map *em;
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start,
+ page_end - search_start);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out_unlock_range;
+ }
+ if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+ free_extent_map(em);
+ /* Ok, 0 means we did not defrag anything */
+ ret = 0;
+ goto out_unlock_range;
+ }
+ search_start = extent_map_end(em);
+ free_extent_map(em);
+ }
+
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 0, 0, &cached_state);
@@ -1401,6 +1448,10 @@ again:
btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return i_done;
+
+out_unlock_range:
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ page_start, page_end - 1, &cached_state);
out:
for (i = 0; i < i_done; i++) {
unlock_page(pages[i]);
@@ -1678,7 +1729,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
btrfs_info(fs_info, "resizing devid %llu", devid);
}
- device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
+ device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
if (!device) {
btrfs_info(fs_info, "resizer unable to find device %llu",
devid);
@@ -3321,7 +3372,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
rcu_read_lock();
dev = btrfs_find_device(fs_info->fs_devices, di_args->devid, s_uuid,
- NULL, true);
+ NULL);
if (!dev) {
ret = -ENODEV;
@@ -3393,7 +3444,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = -ENOMEM;
goto out_free;
}
- path->leave_spinning = 1;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 66e02ebdd340..5fafc5e89bb7 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -17,404 +17,89 @@
* Extent buffer locking
* =====================
*
- * The locks use a custom scheme that allows to do more operations than are
- * available fromt current locking primitives. The building blocks are still
- * rwlock and wait queues.
- *
- * Required semantics:
+ * We use a rw_semaphore for tree locking, and the semantics are exactly the
+ * same:
*
* - reader/writer exclusion
* - writer/writer exclusion
* - reader/reader sharing
- * - spinning lock semantics
- * - blocking lock semantics
* - try-lock semantics for readers and writers
- * - one level nesting, allowing read lock to be taken by the same thread that
- * already has write lock
- *
- * The extent buffer locks (also called tree locks) manage access to eb data
- * related to the storage in the b-tree (keys, items, but not the individual
- * members of eb).
- * We want concurrency of many readers and safe updates. The underlying locking
- * is done by read-write spinlock and the blocking part is implemented using
- * counters and wait queues.
- *
- * spinning semantics - the low-level rwlock is held so all other threads that
- * want to take it are spinning on it.
- *
- * blocking semantics - the low-level rwlock is not held but the counter
- * denotes how many times the blocking lock was held;
- * sleeping is possible
- *
- * Write lock always allows only one thread to access the data.
- *
- *
- * Debugging
- * ---------
- *
- * There are additional state counters that are asserted in various contexts,
- * removed from non-debug build to reduce extent_buffer size and for
- * performance reasons.
- *
- *
- * Lock recursion
- * --------------
- *
- * A write operation on a tree might indirectly start a look up on the same
- * tree. This can happen when btrfs_cow_block locks the tree and needs to
- * lookup free extents.
- *
- * btrfs_cow_block
- * ..
- * alloc_tree_block_no_bg_flush
- * btrfs_alloc_tree_block
- * btrfs_reserve_extent
- * ..
- * load_free_space_cache
- * ..
- * btrfs_lookup_file_extent
- * btrfs_search_slot
- *
- *
- * Locking pattern - spinning
- * --------------------------
- *
- * The simple locking scenario, the +--+ denotes the spinning section.
- *
- * +- btrfs_tree_lock
- * | - extent_buffer::rwlock is held
- * | - no heavy operations should happen, eg. IO, memory allocations, large
- * | structure traversals
- * +- btrfs_tree_unock
-*
-*
- * Locking pattern - blocking
- * --------------------------
- *
- * The blocking write uses the following scheme. The +--+ denotes the spinning
- * section.
- *
- * +- btrfs_tree_lock
- * |
- * +- btrfs_set_lock_blocking_write
- *
- * - allowed: IO, memory allocations, etc.
- *
- * -- btrfs_tree_unlock - note, no explicit unblocking necessary
- *
- *
- * Blocking read is similar.
- *
- * +- btrfs_tree_read_lock
- * |
- * +- btrfs_set_lock_blocking_read
- *
- * - heavy operations allowed
- *
- * +- btrfs_tree_read_unlock_blocking
- * |
- * +- btrfs_tree_read_unlock
- *
- */
-
-#ifdef CONFIG_BTRFS_DEBUG
-static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
-{
- WARN_ON(eb->spinning_writers);
- eb->spinning_writers++;
-}
-
-static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
-{
- WARN_ON(eb->spinning_writers != 1);
- eb->spinning_writers--;
-}
-
-static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
-{
- WARN_ON(eb->spinning_writers);
-}
-
-static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
-{
- atomic_inc(&eb->spinning_readers);
-}
-
-static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
-{
- WARN_ON(atomic_read(&eb->spinning_readers) == 0);
- atomic_dec(&eb->spinning_readers);
-}
-
-static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
-{
- atomic_inc(&eb->read_locks);
-}
-
-static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
-{
- atomic_dec(&eb->read_locks);
-}
-
-static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
-{
- BUG_ON(!atomic_read(&eb->read_locks));
-}
-
-static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
-{
- eb->write_locks++;
-}
-
-static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
-{
- eb->write_locks--;
-}
-
-#else
-static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
-static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
-static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
-static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
-static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
-static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
-static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
-static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
-static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
-static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
-#endif
-
-/*
- * Mark already held read lock as blocking. Can be nested in write lock by the
- * same thread.
- *
- * Use when there are potentially long operations ahead so other thread waiting
- * on the lock will not actively spin but sleep instead.
- *
- * The rwlock is released and blocking reader counter is increased.
- */
-void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
-{
- trace_btrfs_set_lock_blocking_read(eb);
- /*
- * No lock is required. The lock owner may change if we have a read
- * lock, but it won't change to or away from us. If we have the write
- * lock, we are the owner and it'll never change.
- */
- if (eb->lock_recursed && current->pid == eb->lock_owner)
- return;
- btrfs_assert_tree_read_locked(eb);
- atomic_inc(&eb->blocking_readers);
- btrfs_assert_spinning_readers_put(eb);
- read_unlock(&eb->lock);
-}
-
-/*
- * Mark already held write lock as blocking.
- *
- * Use when there are potentially long operations ahead so other threads
- * waiting on the lock will not actively spin but sleep instead.
*
- * The rwlock is released and blocking writers is set.
+ * The rwsem implementation does opportunistic spinning which reduces number of
+ * times the locking task needs to sleep.
*/
-void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
-{
- trace_btrfs_set_lock_blocking_write(eb);
- /*
- * No lock is required. The lock owner may change if we have a read
- * lock, but it won't change to or away from us. If we have the write
- * lock, we are the owner and it'll never change.
- */
- if (eb->lock_recursed && current->pid == eb->lock_owner)
- return;
- if (eb->blocking_writers == 0) {
- btrfs_assert_spinning_writers_put(eb);
- btrfs_assert_tree_locked(eb);
- WRITE_ONCE(eb->blocking_writers, 1);
- write_unlock(&eb->lock);
- }
-}
/*
- * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
- * Can be nested in write lock by the same thread.
+ * __btrfs_tree_read_lock - lock extent buffer for read
+ * @eb: the eb to be locked
+ * @nest: the nesting level to be used for lockdep
*
- * Use when the locked section does only lightweight actions and busy waiting
- * would be cheaper than making other threads do the wait/wake loop.
- *
- * The rwlock is held upon exit.
+ * This takes the read lock on the extent buffer, using the specified nesting
+ * level for lockdep purposes.
*/
-void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
- bool recurse)
+void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
{
u64 start_ns = 0;
if (trace_btrfs_tree_read_lock_enabled())
start_ns = ktime_get_ns();
-again:
- read_lock(&eb->lock);
- BUG_ON(eb->blocking_writers == 0 &&
- current->pid == eb->lock_owner);
- if (eb->blocking_writers) {
- if (current->pid == eb->lock_owner) {
- /*
- * This extent is already write-locked by our thread.
- * We allow an additional read lock to be added because
- * it's for the same thread. btrfs_find_all_roots()
- * depends on this as it may be called on a partly
- * (write-)locked tree.
- */
- WARN_ON(!recurse);
- BUG_ON(eb->lock_recursed);
- eb->lock_recursed = true;
- read_unlock(&eb->lock);
- trace_btrfs_tree_read_lock(eb, start_ns);
- return;
- }
- read_unlock(&eb->lock);
- wait_event(eb->write_lock_wq,
- READ_ONCE(eb->blocking_writers) == 0);
- goto again;
- }
- btrfs_assert_tree_read_locks_get(eb);
- btrfs_assert_spinning_readers_get(eb);
+
+ down_read_nested(&eb->lock, nest);
+ eb->lock_owner = current->pid;
trace_btrfs_tree_read_lock(eb, start_ns);
}
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
- __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false);
-}
-
-/*
- * Lock extent buffer for read, optimistically expecting that there are no
- * contending blocking writers. If there are, don't wait.
- *
- * Return 1 if the rwlock has been taken, 0 otherwise
- */
-int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
-{
- if (READ_ONCE(eb->blocking_writers))
- return 0;
-
- read_lock(&eb->lock);
- /* Refetch value after lock */
- if (READ_ONCE(eb->blocking_writers)) {
- read_unlock(&eb->lock);
- return 0;
- }
- btrfs_assert_tree_read_locks_get(eb);
- btrfs_assert_spinning_readers_get(eb);
- trace_btrfs_tree_read_lock_atomic(eb);
- return 1;
+ __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
}
/*
- * Try-lock for read. Don't block or wait for contending writers.
+ * Try-lock for read.
*
* Retrun 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
- if (READ_ONCE(eb->blocking_writers))
- return 0;
-
- if (!read_trylock(&eb->lock))
- return 0;
-
- /* Refetch value after lock */
- if (READ_ONCE(eb->blocking_writers)) {
- read_unlock(&eb->lock);
- return 0;
+ if (down_read_trylock(&eb->lock)) {
+ eb->lock_owner = current->pid;
+ trace_btrfs_try_tree_read_lock(eb);
+ return 1;
}
- btrfs_assert_tree_read_locks_get(eb);
- btrfs_assert_spinning_readers_get(eb);
- trace_btrfs_try_tree_read_lock(eb);
- return 1;
+ return 0;
}
/*
- * Try-lock for write. May block until the lock is uncontended, but does not
- * wait until it is free.
+ * Try-lock for write.
*
* Retrun 1 if the rwlock has been taken, 0 otherwise
*/
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
- if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
- return 0;
-
- write_lock(&eb->lock);
- /* Refetch value after lock */
- if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
- write_unlock(&eb->lock);
- return 0;
+ if (down_write_trylock(&eb->lock)) {
+ eb->lock_owner = current->pid;
+ trace_btrfs_try_tree_write_lock(eb);
+ return 1;
}
- btrfs_assert_tree_write_locks_get(eb);
- btrfs_assert_spinning_writers_get(eb);
- eb->lock_owner = current->pid;
- trace_btrfs_try_tree_write_lock(eb);
- return 1;
+ return 0;
}
/*
- * Release read lock. Must be used only if the lock is in spinning mode. If
- * the read lock is nested, must pair with read lock before the write unlock.
- *
- * The rwlock is not held upon exit.
+ * Release read lock.
*/
void btrfs_tree_read_unlock(struct extent_buffer *eb)
{
trace_btrfs_tree_read_unlock(eb);
- /*
- * if we're nested, we have the write lock. No new locking
- * is needed as long as we are the lock owner.
- * The write unlock will do a barrier for us, and the lock_recursed
- * field only matters to the lock owner.
- */
- if (eb->lock_recursed && current->pid == eb->lock_owner) {
- eb->lock_recursed = false;
- return;
- }
- btrfs_assert_tree_read_locked(eb);
- btrfs_assert_spinning_readers_put(eb);
- btrfs_assert_tree_read_locks_put(eb);
- read_unlock(&eb->lock);
-}
-
-/*
- * Release read lock, previously set to blocking by a pairing call to
- * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
- * thread.
- *
- * State of rwlock is unchanged, last reader wakes waiting threads.
- */
-void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
-{
- trace_btrfs_tree_read_unlock_blocking(eb);
- /*
- * if we're nested, we have the write lock. No new locking
- * is needed as long as we are the lock owner.
- * The write unlock will do a barrier for us, and the lock_recursed
- * field only matters to the lock owner.
- */
- if (eb->lock_recursed && current->pid == eb->lock_owner) {
- eb->lock_recursed = false;
- return;
- }
- btrfs_assert_tree_read_locked(eb);
- WARN_ON(atomic_read(&eb->blocking_readers) == 0);
- /* atomic_dec_and_test implies a barrier */
- if (atomic_dec_and_test(&eb->blocking_readers))
- cond_wake_up_nomb(&eb->read_lock_wq);
- btrfs_assert_tree_read_locks_put(eb);
+ eb->lock_owner = 0;
+ up_read(&eb->lock);
}
/*
- * Lock for write. Wait for all blocking and spinning readers and writers. This
- * starts context where reader lock could be nested by the same thread.
+ * __btrfs_tree_lock - lock eb for write
+ * @eb: the eb to lock
+ * @nest: the nesting to use for the lock
*
- * The rwlock is held for write upon exit.
+ * Returns with the eb->lock write locked.
*/
void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
__acquires(&eb->lock)
@@ -424,19 +109,7 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
if (trace_btrfs_tree_lock_enabled())
start_ns = ktime_get_ns();
- WARN_ON(eb->lock_owner == current->pid);
-again:
- wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
- wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
- write_lock(&eb->lock);
- /* Refetch value after lock */
- if (atomic_read(&eb->blocking_readers) ||
- READ_ONCE(eb->blocking_writers)) {
- write_unlock(&eb->lock);
- goto again;
- }
- btrfs_assert_spinning_writers_get(eb);
- btrfs_assert_tree_write_locks_get(eb);
+ down_write_nested(&eb->lock, nest);
eb->lock_owner = current->pid;
trace_btrfs_tree_lock(eb, start_ns);
}
@@ -447,68 +120,13 @@ void btrfs_tree_lock(struct extent_buffer *eb)
}
/*
- * Release the write lock, either blocking or spinning (ie. there's no need
- * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
- * This also ends the context for nesting, the read lock must have been
- * released already.
- *
- * Tasks blocked and waiting are woken, rwlock is not held upon exit.
+ * Release the write lock.
*/
void btrfs_tree_unlock(struct extent_buffer *eb)
{
- /*
- * This is read both locked and unlocked but always by the same thread
- * that already owns the lock so we don't need to use READ_ONCE
- */
- int blockers = eb->blocking_writers;
-
- BUG_ON(blockers > 1);
-
- btrfs_assert_tree_locked(eb);
trace_btrfs_tree_unlock(eb);
eb->lock_owner = 0;
- btrfs_assert_tree_write_locks_put(eb);
-
- if (blockers) {
- btrfs_assert_no_spinning_writers(eb);
- /* Unlocked write */
- WRITE_ONCE(eb->blocking_writers, 0);
- /*
- * We need to order modifying blocking_writers above with
- * actually waking up the sleepers to ensure they see the
- * updated value of blocking_writers
- */
- cond_wake_up(&eb->write_lock_wq);
- } else {
- btrfs_assert_spinning_writers_put(eb);
- write_unlock(&eb->lock);
- }
-}
-
-/*
- * Set all locked nodes in the path to blocking locks. This should be done
- * before scheduling
- */
-void btrfs_set_path_blocking(struct btrfs_path *p)
-{
- int i;
-
- for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
- if (!p->nodes[i] || !p->locks[i])
- continue;
- /*
- * If we currently have a spinning reader or writer lock this
- * will bump the count of blocking holders and drop the
- * spinlock.
- */
- if (p->locks[i] == BTRFS_READ_LOCK) {
- btrfs_set_lock_blocking_read(p->nodes[i]);
- p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
- } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
- btrfs_set_lock_blocking_write(p->nodes[i]);
- p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
- }
- }
+ up_write(&eb->lock);
}
/*
@@ -564,14 +182,13 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
*
* Return: root extent buffer with read lock held
*/
-struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
- bool recurse)
+struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
{
struct extent_buffer *eb;
while (1) {
eb = btrfs_root_node(root);
- __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, recurse);
+ btrfs_tree_read_lock(eb);
if (eb == root->node)
break;
btrfs_tree_read_unlock(eb);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 3ea81ed3320b..a2e1f1f5c6e3 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -13,8 +13,6 @@
#define BTRFS_WRITE_LOCK 1
#define BTRFS_READ_LOCK 2
-#define BTRFS_WRITE_LOCK_BLOCKING 3
-#define BTRFS_READ_LOCK_BLOCKING 4
/*
* We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
@@ -89,42 +87,28 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
void btrfs_tree_lock(struct extent_buffer *eb);
void btrfs_tree_unlock(struct extent_buffer *eb);
-void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
- bool recurse);
+void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
void btrfs_tree_read_lock(struct extent_buffer *eb);
void btrfs_tree_read_unlock(struct extent_buffer *eb);
-void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
-void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
-void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
-int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
-struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
- bool recurse);
-
-static inline struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
-{
- return __btrfs_read_lock_root_node(root, false);
-}
+struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_DEBUG
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
- BUG_ON(!eb->write_locks);
+ lockdep_assert_held(&eb->lock);
}
#else
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
#endif
-void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{
- if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)
+ if (rw == BTRFS_WRITE_LOCK)
btrfs_tree_unlock(eb);
- else if (rw == BTRFS_READ_LOCK_BLOCKING)
- btrfs_tree_read_unlock_blocking(eb);
else if (rw == BTRFS_READ_LOCK)
btrfs_tree_read_unlock(eb);
else
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 87bac9ecdf4c..79d366a36223 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -855,51 +855,6 @@ out:
}
/*
- * search the ordered extents for one corresponding to 'offset' and
- * try to find a checksum. This is used because we allow pages to
- * be reclaimed before their checksum is actually put into the btree
- */
-int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
- u64 disk_bytenr, u8 *sum, int len)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_ordered_sum *ordered_sum;
- struct btrfs_ordered_extent *ordered;
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
- unsigned long num_sectors;
- unsigned long i;
- u32 sectorsize = btrfs_inode_sectorsize(inode);
- const u8 blocksize_bits = inode->vfs_inode.i_sb->s_blocksize_bits;
- const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
- int index = 0;
-
- ordered = btrfs_lookup_ordered_extent(inode, offset);
- if (!ordered)
- return 0;
-
- spin_lock_irq(&tree->lock);
- list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
- if (disk_bytenr >= ordered_sum->bytenr &&
- disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
- i = (disk_bytenr - ordered_sum->bytenr) >> blocksize_bits;
- num_sectors = ordered_sum->len >> blocksize_bits;
- num_sectors = min_t(int, len - index, num_sectors - i);
- memcpy(sum + index, ordered_sum->sums + i * csum_size,
- num_sectors * csum_size);
-
- index += (int)num_sectors * csum_size;
- if (index == len)
- goto out;
- disk_bytenr += num_sectors * sectorsize;
- }
- }
-out:
- spin_unlock_irq(&tree->lock);
- btrfs_put_ordered_extent(ordered);
- return index;
-}
-
-/*
* btrfs_flush_ordered_range - Lock the passed range and ensures all pending
* ordered extents in it are run to completion.
*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index c3a2325e64a4..0bfa82b58e23 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -137,9 +137,8 @@ static inline int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info,
unsigned long bytes)
{
int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);
- int csum_size = btrfs_super_csum_size(fs_info->super_copy);
- return sizeof(struct btrfs_ordered_sum) + num_sectors * csum_size;
+ return sizeof(struct btrfs_ordered_sum) + num_sectors * fs_info->csum_size;
}
static inline void
@@ -184,8 +183,6 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
u64 len);
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list);
-int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
- u64 disk_bytenr, u8 *sum, int len);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 7695c4783d33..fe5e0026129d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -177,8 +177,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
__le64 subvol_id;
read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id));
- pr_info("\t\tsubvol_id %llu\n",
- (unsigned long long)le64_to_cpu(subvol_id));
+ pr_info("\t\tsubvol_id %llu\n", le64_to_cpu(subvol_id));
item_size -= sizeof(u64);
offset += sizeof(u64);
}
@@ -191,15 +190,8 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
static void print_eb_refs_lock(struct extent_buffer *eb)
{
#ifdef CONFIG_BTRFS_DEBUG
- btrfs_info(eb->fs_info,
-"refs %u lock (w:%d r:%d bw:%d br:%d sw:%d sr:%d) lock_owner %u current %u",
- atomic_read(&eb->refs), eb->write_locks,
- atomic_read(&eb->read_locks),
- eb->blocking_writers,
- atomic_read(&eb->blocking_readers),
- eb->spinning_writers,
- atomic_read(&eb->spinning_readers),
- eb->lock_owner, current->pid);
+ btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
+ atomic_read(&eb->refs), eb->lock_owner, current->pid);
#endif
}
@@ -398,6 +390,7 @@ void btrfs_print_tree(struct extent_buffer *c, bool follow)
btrfs_node_key_to_cpu(c, &first_key, i);
next = read_tree_block(fs_info, btrfs_node_blockptr(c, i),
+ btrfs_header_owner(c),
btrfs_node_ptr_generation(c, i),
level - 1, &first_key);
if (IS_ERR(next)) {
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 87bd37b70738..fe3046007f52 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -894,8 +894,6 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
-
key.objectid = 0;
key.offset = 0;
key.type = 0;
@@ -1944,34 +1942,22 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
struct btrfs_key dst_key;
if (src_path->nodes[cur_level] == NULL) {
- struct btrfs_key first_key;
struct extent_buffer *eb;
int parent_slot;
- u64 child_gen;
- u64 child_bytenr;
eb = src_path->nodes[cur_level + 1];
parent_slot = src_path->slots[cur_level + 1];
- child_bytenr = btrfs_node_blockptr(eb, parent_slot);
- child_gen = btrfs_node_ptr_generation(eb, parent_slot);
- btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
- eb = read_tree_block(fs_info, child_bytenr, child_gen,
- cur_level, &first_key);
+ eb = btrfs_read_node_slot(eb, parent_slot);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
- } else if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- ret = -EIO;
- goto out;
}
src_path->nodes[cur_level] = eb;
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_read(eb);
- src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
+ src_path->locks[cur_level] = BTRFS_READ_LOCK;
}
src_path->slots[cur_level] = dst_path->slots[cur_level];
@@ -2066,10 +2052,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
/* Read the tree block if needed */
if (dst_path->nodes[cur_level] == NULL) {
- struct btrfs_key first_key;
int parent_slot;
u64 child_gen;
- u64 child_bytenr;
/*
* dst_path->nodes[root_level] must be initialized before
@@ -2088,31 +2072,23 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
*/
eb = dst_path->nodes[cur_level + 1];
parent_slot = dst_path->slots[cur_level + 1];
- child_bytenr = btrfs_node_blockptr(eb, parent_slot);
child_gen = btrfs_node_ptr_generation(eb, parent_slot);
- btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
/* This node is old, no need to trace */
if (child_gen < last_snapshot)
goto out;
- eb = read_tree_block(fs_info, child_bytenr, child_gen,
- cur_level, &first_key);
+ eb = btrfs_read_node_slot(eb, parent_slot);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
- } else if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- ret = -EIO;
- goto out;
}
dst_path->nodes[cur_level] = eb;
dst_path->slots[cur_level] = 0;
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_read(eb);
- dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
+ dst_path->locks[cur_level] = BTRFS_READ_LOCK;
need_cleanup = true;
}
@@ -2256,38 +2232,28 @@ walk_down:
level = root_level;
while (level >= 0) {
if (path->nodes[level] == NULL) {
- struct btrfs_key first_key;
int parent_slot;
- u64 child_gen;
u64 child_bytenr;
/*
- * We need to get child blockptr/gen from parent before
- * we can read it.
+ * We need to get child blockptr from parent before we
+ * can read it.
*/
eb = path->nodes[level + 1];
parent_slot = path->slots[level + 1];
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
- child_gen = btrfs_node_ptr_generation(eb, parent_slot);
- btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
- eb = read_tree_block(fs_info, child_bytenr, child_gen,
- level, &first_key);
+ eb = btrfs_read_node_slot(eb, parent_slot);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
- } else if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- ret = -EIO;
- goto out;
}
path->nodes[level] = eb;
path->slots[level] = 0;
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_read(eb);
- path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_READ_LOCK;
ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
fs_info->nodesize,
@@ -4242,7 +4208,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
spin_unlock(&blocks->lock);
/* Read out reloc subtree root */
- reloc_eb = read_tree_block(fs_info, block->reloc_bytenr,
+ reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0,
block->reloc_generation, block->level,
&block->first_key);
if (IS_ERR(reloc_eb)) {
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 255490f42b5d..93fbf87bdc8d 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1097,7 +1097,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
/* see if we can add this page onto our existing bio */
if (last) {
- u64 last_end = (u64)last->bi_iter.bi_sector << 9;
+ u64 last_end = last->bi_iter.bi_sector << 9;
last_end += last->bi_iter.bi_size;
/*
@@ -1163,7 +1163,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
struct bvec_iter iter;
int i = 0;
- start = (u64)bio->bi_iter.bi_sector << 9;
+ start = bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT;
@@ -1374,7 +1374,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
- u64 logical = (u64)bio->bi_iter.bi_sector << 9;
+ u64 logical = bio->bi_iter.bi_sector << 9;
int i;
for (i = 0; i < rbio->nr_data; i++) {
@@ -2150,7 +2150,7 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
if (rbio->faila == -1) {
btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
- __func__, (u64)bio->bi_iter.bi_sector << 9,
+ __func__, bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type);
if (generic_io)
btrfs_put_bbio(bbio);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index d9a166eb344e..20fd4aa48a8c 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -52,6 +52,7 @@ struct reada_extctl {
struct reada_extent {
u64 logical;
+ u64 owner_root;
struct btrfs_key top;
struct list_head extctl;
int refcnt;
@@ -59,6 +60,7 @@ struct reada_extent {
struct reada_zone *zones[BTRFS_MAX_MIRRORS];
int nzones;
int scheduled;
+ int level;
};
struct reada_zone {
@@ -87,7 +89,8 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info);
static void __reada_start_machine(struct btrfs_fs_info *fs_info);
static int reada_add_block(struct reada_control *rc, u64 logical,
- struct btrfs_key *top, u64 generation);
+ struct btrfs_key *top, u64 owner_root,
+ u64 generation, int level);
/* recurses */
/* in case of err, eb might be NULL */
@@ -165,7 +168,9 @@ static void __readahead_hook(struct btrfs_fs_info *fs_info,
if (rec->generation == generation &&
btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
- reada_add_block(rc, bytenr, &next_key, n_gen);
+ reada_add_block(rc, bytenr, &next_key,
+ btrfs_header_owner(eb), n_gen,
+ btrfs_header_level(eb) - 1);
}
}
@@ -298,7 +303,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
u64 logical,
- struct btrfs_key *top)
+ struct btrfs_key *top,
+ u64 owner_root, int level)
{
int ret;
struct reada_extent *re = NULL;
@@ -331,6 +337,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&re->extctl);
spin_lock_init(&re->lock);
re->refcnt = 1;
+ re->owner_root = owner_root;
+ re->level = level;
/*
* map block
@@ -531,6 +539,8 @@ static void reada_zone_release(struct kref *kref)
{
struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
+ lockdep_assert_held(&zone->device->fs_info->reada_lock);
+
radix_tree_delete(&zone->device->reada_zones,
zone->end >> PAGE_SHIFT);
@@ -546,14 +556,15 @@ static void reada_control_release(struct kref *kref)
}
static int reada_add_block(struct reada_control *rc, u64 logical,
- struct btrfs_key *top, u64 generation)
+ struct btrfs_key *top, u64 owner_root,
+ u64 generation, int level)
{
struct btrfs_fs_info *fs_info = rc->fs_info;
struct reada_extent *re;
struct reada_extctl *rec;
/* takes one ref */
- re = reada_find_extent(fs_info, logical, top);
+ re = reada_find_extent(fs_info, logical, top, owner_root, level);
if (!re)
return -1;
@@ -645,12 +656,13 @@ static int reada_pick_zone(struct btrfs_device *dev)
}
static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
- int mirror_num, struct extent_buffer **eb)
+ u64 owner_root, int level, int mirror_num,
+ struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(fs_info, bytenr);
+ buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
if (IS_ERR(buf))
return 0;
@@ -738,7 +750,8 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
logical = re->logical;
atomic_inc(&dev->reada_in_flight);
- ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
+ ret = reada_tree_block_flagged(fs_info, logical, re->owner_root,
+ re->level, mirror_num, &eb);
if (ret)
__readahead_hook(fs_info, re, NULL, ret);
else if (eb)
@@ -945,6 +958,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
u64 start;
u64 generation;
int ret;
+ int level;
struct extent_buffer *node;
static struct btrfs_key max_key = {
.objectid = (u64)-1,
@@ -967,9 +981,11 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
node = btrfs_root_node(root);
start = node->start;
generation = btrfs_header_generation(node);
+ level = btrfs_header_level(node);
free_extent_buffer(node);
- ret = reada_add_block(rc, start, &max_key, generation);
+ ret = reada_add_block(rc, start, &max_key, root->root_key.objectid,
+ generation, level);
if (ret) {
kfree(rc);
return ERR_PTR(ret);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 78693d3dd15b..4b9b6c52a83b 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -551,34 +551,19 @@ static int process_leaf(struct btrfs_root *root,
static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
int level, u64 *bytenr, u64 *num_bytes)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb;
- u64 block_bytenr, gen;
int ret = 0;
while (level >= 0) {
if (level) {
- struct btrfs_key first_key;
-
- block_bytenr = btrfs_node_blockptr(path->nodes[level],
- path->slots[level]);
- gen = btrfs_node_ptr_generation(path->nodes[level],
- path->slots[level]);
- btrfs_node_key_to_cpu(path->nodes[level], &first_key,
- path->slots[level]);
- eb = read_tree_block(fs_info, block_bytenr, gen,
- level - 1, &first_key);
+ eb = btrfs_read_node_slot(path->nodes[level],
+ path->slots[level]);
if (IS_ERR(eb))
return PTR_ERR(eb);
- if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- return -EIO;
- }
btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_read(eb);
path->nodes[level-1] = eb;
path->slots[level-1] = 0;
- path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
+ path->locks[level-1] = BTRFS_READ_LOCK;
} else {
ret = process_leaf(root, path, bytenr, num_bytes);
if (ret)
@@ -799,8 +784,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
if (!be) {
btrfs_err(fs_info,
"trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
- action, (unsigned long long)bytenr,
- (unsigned long long)num_bytes);
+ action, bytenr, num_bytes);
dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra);
@@ -1001,11 +985,10 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
return -ENOMEM;
eb = btrfs_read_lock_root_node(fs_info->extent_root);
- btrfs_set_lock_blocking_read(eb);
level = btrfs_header_level(eb);
path->nodes[level] = eb;
path->slots[level] = 0;
- path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+ path->locks[level] = BTRFS_READ_LOCK;
while (1) {
/*
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 99aa87c08912..ab80896315be 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -31,10 +31,10 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
endoff = destoff + olen;
if (endoff > inode->i_size) {
i_size_write(inode, endoff);
- btrfs_inode_safe_disk_i_size_write(inode, 0);
+ btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
@@ -163,6 +163,7 @@ static int clone_copy_inline_extent(struct inode *dst,
const u64 aligned_end = ALIGN(new_key->offset + datal,
fs_info->sectorsize);
struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_drop_extents_args drop_args = { 0 };
int ret;
struct btrfs_key key;
@@ -252,7 +253,11 @@ copy_inline_extent:
trans = NULL;
goto out;
}
- ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
+ drop_args.path = path;
+ drop_args.start = drop_start;
+ drop_args.end = aligned_end;
+ drop_args.drop_cache = true;
+ ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
if (ret)
goto out;
ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
@@ -263,7 +268,7 @@ copy_inline_extent:
btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]),
size);
- inode_add_bytes(dst, datal);
+ btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
out:
@@ -347,7 +352,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
u64 drop_start;
/* Note the key will change type as we walk through the tree */
- path->leave_spinning = 1;
ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
0, 0);
if (ret < 0)
@@ -417,7 +421,6 @@ process_slot:
size);
btrfs_release_path(path);
- path->leave_spinning = 0;
memcpy(&new_key, &key, sizeof(new_key));
new_key.objectid = btrfs_ino(BTRFS_I(inode));
@@ -533,7 +536,6 @@ process_slot:
* mixing buffered and direct IO writes against this file.
*/
btrfs_release_path(path);
- path->leave_spinning = 0;
ret = btrfs_replace_file_extents(inode, path, last_dest_end,
destoff + len - 1, NULL, &trans);
@@ -652,7 +654,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
if (destoff > inode->i_size) {
const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
- ret = btrfs_cont_expand(inode, inode->i_size, destoff);
+ ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff);
if (ret)
return ret;
/*
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 9ba92d86da0b..19b7db8b2117 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -18,7 +18,6 @@
#include "btrfs_inode.h"
#include "async-thread.h"
#include "free-space-cache.h"
-#include "inode-map.h"
#include "qgroup.h"
#include "print-tree.h"
#include "delalloc-space.h"
@@ -783,7 +782,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
btrfs_set_root_refs(root_item, 0);
memset(&root_item->drop_progress, 0,
sizeof(struct btrfs_disk_key));
- root_item->drop_level = 0;
+ btrfs_set_root_drop_level(root_item, 0);
}
btrfs_tree_unlock(eb);
@@ -1196,7 +1195,6 @@ again:
btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
eb = btrfs_lock_root_node(dest);
- btrfs_set_lock_blocking_write(eb);
level = btrfs_header_level(eb);
if (level < lowest_level) {
@@ -1210,7 +1208,6 @@ again:
BTRFS_NESTING_COW);
BUG_ON(ret);
}
- btrfs_set_lock_blocking_write(eb);
if (next_key) {
next_key->objectid = (u64)-1;
@@ -1220,8 +1217,6 @@ again:
parent = eb;
while (1) {
- struct btrfs_key first_key;
-
level = btrfs_header_level(parent);
BUG_ON(level < lowest_level);
@@ -1237,7 +1232,6 @@ again:
old_bytenr = btrfs_node_blockptr(parent, slot);
blocksize = fs_info->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
- btrfs_node_key_to_cpu(parent, &first_key, slot);
if (level <= max_level) {
eb = path->nodes[level];
@@ -1262,15 +1256,10 @@ again:
break;
}
- eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
- level - 1, &first_key);
+ eb = btrfs_read_node_slot(parent, slot);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
- } else if (!extent_buffer_uptodate(eb)) {
- ret = -EIO;
- free_extent_buffer(eb);
- break;
}
btrfs_tree_lock(eb);
if (cow) {
@@ -1279,7 +1268,6 @@ again:
BTRFS_NESTING_COW);
BUG_ON(ret);
}
- btrfs_set_lock_blocking_write(eb);
btrfs_tree_unlock(parent);
free_extent_buffer(parent);
@@ -1418,10 +1406,8 @@ static noinline_for_stack
int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb = NULL;
int i;
- u64 bytenr;
u64 ptr_gen = 0;
u64 last_snapshot;
u32 nritems;
@@ -1429,8 +1415,6 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
last_snapshot = btrfs_root_last_snapshot(&root->root_item);
for (i = *level; i > 0; i--) {
- struct btrfs_key first_key;
-
eb = path->nodes[i];
nritems = btrfs_header_nritems(eb);
while (path->slots[i] < nritems) {
@@ -1450,16 +1434,9 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
return 0;
}
- bytenr = btrfs_node_blockptr(eb, path->slots[i]);
- btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
- eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
- &first_key);
- if (IS_ERR(eb)) {
+ eb = btrfs_read_node_slot(eb, path->slots[i]);
+ if (IS_ERR(eb))
return PTR_ERR(eb);
- } else if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- return -EIO;
- }
BUG_ON(btrfs_header_level(eb) != i - 1);
path->nodes[i - 1] = eb;
path->slots[i - 1] = 0;
@@ -1575,7 +1552,7 @@ static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
reloc_root_item = &reloc_root->root_item;
memset(&reloc_root_item->drop_progress, 0,
sizeof(reloc_root_item->drop_progress));
- reloc_root_item->drop_level = 0;
+ btrfs_set_root_drop_level(reloc_root_item, 0);
btrfs_set_root_refs(reloc_root_item, 0);
btrfs_update_reloc_root(trans, root);
@@ -1652,8 +1629,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
int level;
int max_level;
int replaced = 0;
- int ret;
- int err = 0;
+ int ret = 0;
u32 min_reserved;
path = btrfs_alloc_path();
@@ -1672,7 +1648,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
} else {
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
- level = root_item->drop_level;
+ level = btrfs_root_drop_level(root_item);
BUG_ON(level == 0);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
@@ -1704,13 +1680,11 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
while (1) {
ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
BTRFS_RESERVE_FLUSH_LIMIT);
- if (ret) {
- err = ret;
+ if (ret)
goto out;
- }
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
@@ -1732,10 +1706,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
max_level = level;
ret = walk_down_reloc_tree(reloc_root, path, &level);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
if (ret > 0)
break;
@@ -1746,11 +1718,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
ret = replace_path(trans, rc, root, reloc_root, path,
&next_key, level, max_level);
}
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
-
if (ret > 0) {
level = ret;
btrfs_node_key_to_cpu(path->nodes[level], &key,
@@ -1769,7 +1738,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
*/
btrfs_node_key(path->nodes[level], &root_item->drop_progress,
path->slots[level]);
- root_item->drop_level = level;
+ btrfs_set_root_drop_level(root_item, level);
btrfs_end_transaction_throttle(trans);
trans = NULL;
@@ -1789,12 +1758,10 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
BTRFS_NESTING_COW);
btrfs_tree_unlock(leaf);
free_extent_buffer(leaf);
- if (ret < 0)
- err = ret;
out:
btrfs_free_path(path);
- if (err == 0)
+ if (ret == 0)
insert_dirty_subvol(trans, rc, root);
if (trans)
@@ -1805,7 +1772,7 @@ out:
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
- return err;
+ return ret;
}
static noinline_for_stack
@@ -2205,7 +2172,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
- struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_backref_node *upper;
struct btrfs_backref_edge *edge;
struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@@ -2213,17 +2179,14 @@ static int do_relocation(struct btrfs_trans_handle *trans,
struct extent_buffer *eb;
u32 blocksize;
u64 bytenr;
- u64 generation;
int slot;
- int ret;
- int err = 0;
+ int ret = 0;
BUG_ON(lowest && node->eb);
path->lowest_level = node->level + 1;
rc->backref_cache.path[node->level] = node;
list_for_each_entry(edge, &node->upper, list[LOWER]) {
- struct btrfs_key first_key;
struct btrfs_ref ref = { 0 };
cond_resched();
@@ -2235,10 +2198,8 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (upper->eb && !upper->locked) {
if (!lowest) {
ret = btrfs_bin_search(upper->eb, key, &slot);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto next;
- }
BUG_ON(ret);
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (node->eb->start == bytenr)
@@ -2250,10 +2211,8 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (!upper->eb) {
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret) {
- if (ret < 0)
- err = ret;
- else
- err = -ENOENT;
+ if (ret > 0)
+ ret = -ENOENT;
btrfs_release_path(path);
break;
@@ -2273,10 +2232,8 @@ static int do_relocation(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
} else {
ret = btrfs_bin_search(upper->eb, key, &slot);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto next;
- }
BUG_ON(ret);
}
@@ -2287,7 +2244,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
bytenr, node->bytenr, slot,
upper->eb->start);
- err = -EIO;
+ ret = -EIO;
goto next;
}
} else {
@@ -2296,30 +2253,20 @@ static int do_relocation(struct btrfs_trans_handle *trans,
}
blocksize = root->fs_info->nodesize;
- generation = btrfs_node_ptr_generation(upper->eb, slot);
- btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
- eb = read_tree_block(fs_info, bytenr, generation,
- upper->level - 1, &first_key);
+ eb = btrfs_read_node_slot(upper->eb, slot);
if (IS_ERR(eb)) {
- err = PTR_ERR(eb);
- goto next;
- } else if (!extent_buffer_uptodate(eb)) {
- free_extent_buffer(eb);
- err = -EIO;
+ ret = PTR_ERR(eb);
goto next;
}
btrfs_tree_lock(eb);
- btrfs_set_lock_blocking_write(eb);
if (!node->eb) {
ret = btrfs_cow_block(trans, root, eb, upper->eb,
slot, &eb, BTRFS_NESTING_COW);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto next;
- }
BUG_ON(node->eb != eb);
} else {
btrfs_set_node_blockptr(upper->eb, slot,
@@ -2345,19 +2292,19 @@ next:
btrfs_backref_drop_node_buffer(upper);
else
btrfs_backref_unlock_node_buffer(upper);
- if (err)
+ if (ret)
break;
}
- if (!err && node->pending) {
+ if (!ret && node->pending) {
btrfs_backref_drop_node_buffer(node);
list_move_tail(&node->list, &rc->backref_cache.changed);
node->pending = 0;
}
path->lowest_level = 0;
- BUG_ON(err == -ENOSPC);
- return err;
+ BUG_ON(ret == -ENOSPC);
+ return ret;
}
static int link_to_upper(struct btrfs_trans_handle *trans,
@@ -2446,7 +2393,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
{
struct extent_buffer *eb;
- eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
+ eb = read_tree_block(fs_info, block->bytenr, 0, block->key.offset,
block->level, NULL);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
@@ -2546,7 +2493,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Kick in readahead for tree blocks with missing keys */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready)
- readahead_tree_block(fs_info, block->bytenr);
+ btrfs_readahead_tree_block(fs_info, block->bytenr, 0, 0,
+ block->level);
}
/* Get first keys */
@@ -3071,7 +3019,7 @@ int add_data_references(struct reloc_control *rc,
while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
struct extent_buffer *eb;
- eb = read_tree_block(fs_info, ref_node->val, 0, 0, NULL);
+ eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
@@ -3694,7 +3642,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
memset(&root->root_item.drop_progress, 0,
sizeof(root->root_item.drop_progress));
- root->root_item.drop_level = 0;
+ btrfs_set_root_drop_level(&root->root_item, 0);
btrfs_set_root_refs(&root->root_item, 0);
ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e71e7586e9eb..5f4f88a4d2c8 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -20,6 +20,7 @@
#include "rcu-string.h"
#include "raid56.h"
#include "block-group.h"
+#include "zoned.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@@ -71,11 +72,9 @@ struct scrub_page {
u64 physical;
u64 physical_for_dev_replace;
atomic_t refs;
- struct {
- unsigned int mirror_num:8;
- unsigned int have_csum:1;
- unsigned int io_error:1;
- };
+ u8 mirror_num;
+ int have_csum:1;
+ int io_error:1;
u8 csum[BTRFS_CSUM_SIZE];
struct scrub_recover *recover;
@@ -131,7 +130,7 @@ struct scrub_parity {
int nsectors;
- u64 stripe_len;
+ u32 stripe_len;
refcount_t refs;
@@ -161,7 +160,6 @@ struct scrub_ctx {
atomic_t workers_pending;
spinlock_t list_lock;
wait_queue_head_t list_wait;
- u16 csum_size;
struct list_head csum_list;
atomic_t cancel_req;
int readonly;
@@ -235,15 +233,15 @@ static void scrub_parity_get(struct scrub_parity *sparity);
static void scrub_parity_put(struct scrub_parity *sparity);
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
struct scrub_page *spage);
-static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
u64 physical, struct btrfs_device *dev, u64 flags,
- u64 gen, int mirror_num, u8 *csum, int force,
+ u64 gen, int mirror_num, u8 *csum,
u64 physical_for_dev_replace);
static void scrub_bio_end_io(struct bio *bio);
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
- u64 extent_logical, u64 extent_len,
+ u64 extent_logical, u32 extent_len,
u64 *extent_physical,
struct btrfs_device **extent_dev,
int *extent_mirror_num);
@@ -256,10 +254,10 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_put_ctx(struct scrub_ctx *sctx);
-static inline int scrub_is_page_on_raid56(struct scrub_page *page)
+static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
{
- return page->recover &&
- (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+ return spage->recover &&
+ (spage->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
}
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
@@ -610,7 +608,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
atomic_set(&sctx->bios_in_flight, 0);
atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0);
- sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
spin_lock_init(&sctx->list_lock);
spin_lock_init(&sctx->stat_lock);
@@ -1092,11 +1089,11 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
success = 1;
for (page_num = 0; page_num < sblock_bad->page_count;
page_num++) {
- struct scrub_page *page_bad = sblock_bad->pagev[page_num];
+ struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
struct scrub_block *sblock_other = NULL;
/* skip no-io-error page in scrub */
- if (!page_bad->io_error && !sctx->is_dev_replace)
+ if (!spage_bad->io_error && !sctx->is_dev_replace)
continue;
if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
@@ -1108,7 +1105,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
* sblock_for_recheck array to target device.
*/
sblock_other = NULL;
- } else if (page_bad->io_error) {
+ } else if (spage_bad->io_error) {
/* try to find no-io-error page in mirrors */
for (mirror_index = 0;
mirror_index < BTRFS_MAX_MIRRORS &&
@@ -1147,7 +1144,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
sblock_other,
page_num, 0);
if (0 == ret)
- page_bad->io_error = 0;
+ spage_bad->io_error = 0;
else
success = 0;
}
@@ -1325,13 +1322,13 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) {
struct scrub_block *sblock;
- struct scrub_page *page;
+ struct scrub_page *spage;
sblock = sblocks_for_recheck + mirror_index;
sblock->sctx = sctx;
- page = kzalloc(sizeof(*page), GFP_NOFS);
- if (!page) {
+ spage = kzalloc(sizeof(*spage), GFP_NOFS);
+ if (!spage) {
leave_nomem:
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
@@ -1339,17 +1336,17 @@ leave_nomem:
scrub_put_recover(fs_info, recover);
return -ENOMEM;
}
- scrub_page_get(page);
- sblock->pagev[page_index] = page;
- page->sblock = sblock;
- page->flags = flags;
- page->generation = generation;
- page->logical = logical;
- page->have_csum = have_csum;
+ scrub_page_get(spage);
+ sblock->pagev[page_index] = spage;
+ spage->sblock = sblock;
+ spage->flags = flags;
+ spage->generation = generation;
+ spage->logical = logical;
+ spage->have_csum = have_csum;
if (have_csum)
- memcpy(page->csum,
+ memcpy(spage->csum,
original_sblock->pagev[0]->csum,
- sctx->csum_size);
+ sctx->fs_info->csum_size);
scrub_stripe_index_and_offset(logical,
bbio->map_type,
@@ -1360,23 +1357,23 @@ leave_nomem:
mirror_index,
&stripe_index,
&stripe_offset);
- page->physical = bbio->stripes[stripe_index].physical +
+ spage->physical = bbio->stripes[stripe_index].physical +
stripe_offset;
- page->dev = bbio->stripes[stripe_index].dev;
+ spage->dev = bbio->stripes[stripe_index].dev;
BUG_ON(page_index >= original_sblock->page_count);
- page->physical_for_dev_replace =
+ spage->physical_for_dev_replace =
original_sblock->pagev[page_index]->
physical_for_dev_replace;
/* for missing devices, dev->bdev is NULL */
- page->mirror_num = mirror_index + 1;
+ spage->mirror_num = mirror_index + 1;
sblock->page_count++;
- page->page = alloc_page(GFP_NOFS);
- if (!page->page)
+ spage->page = alloc_page(GFP_NOFS);
+ if (!spage->page)
goto leave_nomem;
scrub_get_recover(recover);
- page->recover = recover;
+ spage->recover = recover;
}
scrub_put_recover(fs_info, recover);
length -= sublen;
@@ -1394,19 +1391,19 @@ static void scrub_bio_wait_endio(struct bio *bio)
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
struct bio *bio,
- struct scrub_page *page)
+ struct scrub_page *spage)
{
DECLARE_COMPLETION_ONSTACK(done);
int ret;
int mirror_num;
- bio->bi_iter.bi_sector = page->logical >> 9;
+ bio->bi_iter.bi_sector = spage->logical >> 9;
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
- mirror_num = page->sblock->pagev[0]->mirror_num;
- ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
- page->recover->map_length,
+ mirror_num = spage->sblock->pagev[0]->mirror_num;
+ ret = raid56_parity_recover(fs_info, bio, spage->recover->bbio,
+ spage->recover->map_length,
mirror_num, 0);
if (ret)
return ret;
@@ -1431,10 +1428,10 @@ static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
bio_set_dev(bio, first_page->dev->bdev);
for (page_num = 0; page_num < sblock->page_count; page_num++) {
- struct scrub_page *page = sblock->pagev[page_num];
+ struct scrub_page *spage = sblock->pagev[page_num];
- WARN_ON(!page->page);
- bio_add_page(bio, page->page, PAGE_SIZE, 0);
+ WARN_ON(!spage->page);
+ bio_add_page(bio, spage->page, PAGE_SIZE, 0);
}
if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
@@ -1475,24 +1472,24 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
for (page_num = 0; page_num < sblock->page_count; page_num++) {
struct bio *bio;
- struct scrub_page *page = sblock->pagev[page_num];
+ struct scrub_page *spage = sblock->pagev[page_num];
- if (page->dev->bdev == NULL) {
- page->io_error = 1;
+ if (spage->dev->bdev == NULL) {
+ spage->io_error = 1;
sblock->no_io_error_seen = 0;
continue;
}
- WARN_ON(!page->page);
+ WARN_ON(!spage->page);
bio = btrfs_io_bio_alloc(1);
- bio_set_dev(bio, page->dev->bdev);
+ bio_set_dev(bio, spage->dev->bdev);
- bio_add_page(bio, page->page, PAGE_SIZE, 0);
- bio->bi_iter.bi_sector = page->physical >> 9;
+ bio_add_page(bio, spage->page, PAGE_SIZE, 0);
+ bio->bi_iter.bi_sector = spage->physical >> 9;
bio->bi_opf = REQ_OP_READ;
if (btrfsic_submit_bio_wait(bio)) {
- page->io_error = 1;
+ spage->io_error = 1;
sblock->no_io_error_seen = 0;
}
@@ -1548,36 +1545,36 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good,
int page_num, int force_write)
{
- struct scrub_page *page_bad = sblock_bad->pagev[page_num];
- struct scrub_page *page_good = sblock_good->pagev[page_num];
+ struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
+ struct scrub_page *spage_good = sblock_good->pagev[page_num];
struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
- BUG_ON(page_bad->page == NULL);
- BUG_ON(page_good->page == NULL);
+ BUG_ON(spage_bad->page == NULL);
+ BUG_ON(spage_good->page == NULL);
if (force_write || sblock_bad->header_error ||
- sblock_bad->checksum_error || page_bad->io_error) {
+ sblock_bad->checksum_error || spage_bad->io_error) {
struct bio *bio;
int ret;
- if (!page_bad->dev->bdev) {
+ if (!spage_bad->dev->bdev) {
btrfs_warn_rl(fs_info,
"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
return -EIO;
}
bio = btrfs_io_bio_alloc(1);
- bio_set_dev(bio, page_bad->dev->bdev);
- bio->bi_iter.bi_sector = page_bad->physical >> 9;
+ bio_set_dev(bio, spage_bad->dev->bdev);
+ bio->bi_iter.bi_sector = spage_bad->physical >> 9;
bio->bi_opf = REQ_OP_WRITE;
- ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
+ ret = bio_add_page(bio, spage_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
bio_put(bio);
return -EIO;
}
if (btrfsic_submit_bio_wait(bio)) {
- btrfs_dev_stat_inc_and_print(page_bad->dev,
+ btrfs_dev_stat_inc_and_print(spage_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
atomic64_inc(&fs_info->dev_replace.num_write_errors);
bio_put(bio);
@@ -1798,11 +1795,15 @@ static int scrub_checksum_data(struct scrub_block *sblock)
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
- crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
- if (memcmp(csum, spage->csum, sctx->csum_size))
- sblock->checksum_error = 1;
+ /*
+ * In scrub_pages() and scrub_pages_for_parity() we ensure each spage
+ * only contains one sector of data.
+ */
+ crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
+ if (memcmp(csum, spage->csum, fs_info->csum_size))
+ sblock->checksum_error = 1;
return sblock->checksum_error;
}
@@ -1814,16 +1815,26 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
- const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT;
+ /*
+ * This is done in sectorsize steps even for metadata as there's a
+ * constraint for nodesize to be aligned to sectorsize. This will need
+ * to change so we don't misuse data and metadata units like that.
+ */
+ const u32 sectorsize = sctx->fs_info->sectorsize;
+ const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
int i;
struct scrub_page *spage;
char *kaddr;
BUG_ON(sblock->page_count < 1);
+
+ /* Each member in pagev is just one block, not a full page */
+ ASSERT(sblock->page_count == num_sectors);
+
spage = sblock->pagev[0];
kaddr = page_address(spage->page);
h = (struct btrfs_header *)kaddr;
- memcpy(on_disk_csum, h->csum, sctx->csum_size);
+ memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
/*
* we don't use the getter functions here, as we
@@ -1848,15 +1859,15 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
- PAGE_SIZE - BTRFS_CSUM_SIZE);
+ sectorsize - BTRFS_CSUM_SIZE);
- for (i = 1; i < num_pages; i++) {
+ for (i = 1; i < num_sectors; i++) {
kaddr = page_address(sblock->pagev[i]->page);
- crypto_shash_update(shash, kaddr, PAGE_SIZE);
+ crypto_shash_update(shash, kaddr, sectorsize);
}
crypto_shash_final(shash, calculated_csum);
- if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
+ if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
sblock->checksum_error = 1;
return sblock->header_error || sblock->checksum_error;
@@ -1893,7 +1904,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
- if (memcmp(calculated_csum, s->csum, sctx->csum_size))
+ if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
++fail_cor;
if (fail_cor + fail_gen) {
@@ -2150,12 +2161,13 @@ bbio_out:
spin_unlock(&sctx->stat_lock);
}
-static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
u64 physical, struct btrfs_device *dev, u64 flags,
- u64 gen, int mirror_num, u8 *csum, int force,
+ u64 gen, int mirror_num, u8 *csum,
u64 physical_for_dev_replace)
{
struct scrub_block *sblock;
+ const u32 sectorsize = sctx->fs_info->sectorsize;
int index;
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
@@ -2174,7 +2186,12 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
for (index = 0; len > 0; index++) {
struct scrub_page *spage;
- u64 l = min_t(u64, len, PAGE_SIZE);
+ /*
+ * Here we will allocate one page for one sector to scrub.
+ * This is fine if PAGE_SIZE == sectorsize, but will cost
+ * more memory for PAGE_SIZE > sectorsize case.
+ */
+ u32 l = min(sectorsize, len);
spage = kzalloc(sizeof(*spage), GFP_KERNEL);
if (!spage) {
@@ -2198,7 +2215,7 @@ leave_nomem:
spage->mirror_num = mirror_num;
if (csum) {
spage->have_csum = 1;
- memcpy(spage->csum, csum, sctx->csum_size);
+ memcpy(spage->csum, csum, sctx->fs_info->csum_size);
} else {
spage->have_csum = 0;
}
@@ -2231,7 +2248,7 @@ leave_nomem:
}
}
- if (force)
+ if (flags & BTRFS_EXTENT_FLAG_SUPER)
scrub_submit(sctx);
}
@@ -2295,12 +2312,11 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
unsigned long *bitmap,
- u64 start, u64 len)
+ u64 start, u32 len)
{
u64 offset;
- u64 nsectors64;
u32 nsectors;
- int sectorsize = sparity->sctx->fs_info->sectorsize;
+ u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
if (len >= sparity->stripe_len) {
bitmap_set(bitmap, 0, sparity->nsectors);
@@ -2309,11 +2325,8 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
start -= sparity->logic_start;
start = div64_u64_rem(start, sparity->stripe_len, &offset);
- offset = div_u64(offset, sectorsize);
- nsectors64 = div_u64(len, sectorsize);
-
- ASSERT(nsectors64 < UINT_MAX);
- nsectors = (u32)nsectors64;
+ offset = offset >> sectorsize_bits;
+ nsectors = len >> sectorsize_bits;
if (offset + nsectors <= sparity->nsectors) {
bitmap_set(bitmap, offset, nsectors);
@@ -2325,13 +2338,13 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
}
static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
- u64 start, u64 len)
+ u64 start, u32 len)
{
__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
}
static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
- u64 start, u64 len)
+ u64 start, u32 len)
{
__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
}
@@ -2359,48 +2372,77 @@ static void scrub_block_complete(struct scrub_block *sblock)
u64 end = sblock->pagev[sblock->page_count - 1]->logical +
PAGE_SIZE;
+ ASSERT(end - start <= U32_MAX);
scrub_parity_mark_sectors_error(sblock->sparity,
start, end - start);
}
}
+static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
+{
+ sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
+ list_del(&sum->list);
+ kfree(sum);
+}
+
+/*
+ * Find the desired csum for range [logical, logical + sectorsize), and store
+ * the csum into @csum.
+ *
+ * The search source is sctx->csum_list, which is a pre-populated list
+ * storing bytenr ordered csum ranges. We're reponsible to cleanup any range
+ * that is before @logical.
+ *
+ * Return 0 if there is no csum for the range.
+ * Return 1 if there is csum for the range and copied to @csum.
+ */
static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
{
- struct btrfs_ordered_sum *sum = NULL;
- unsigned long index;
- unsigned long num_sectors;
+ bool found = false;
while (!list_empty(&sctx->csum_list)) {
+ struct btrfs_ordered_sum *sum = NULL;
+ unsigned long index;
+ unsigned long num_sectors;
+
sum = list_first_entry(&sctx->csum_list,
struct btrfs_ordered_sum, list);
+ /* The current csum range is beyond our range, no csum found */
if (sum->bytenr > logical)
- return 0;
- if (sum->bytenr + sum->len > logical)
break;
- ++sctx->stat.csum_discards;
- list_del(&sum->list);
- kfree(sum);
- sum = NULL;
- }
- if (!sum)
- return 0;
+ /*
+ * The current sum is before our bytenr, since scrub is always
+ * done in bytenr order, the csum will never be used anymore,
+ * clean it up so that later calls won't bother with the range,
+ * and continue search the next range.
+ */
+ if (sum->bytenr + sum->len <= logical) {
+ drop_csum_range(sctx, sum);
+ continue;
+ }
- index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
- ASSERT(index < UINT_MAX);
+ /* Now the csum range covers our bytenr, copy the csum */
+ found = true;
+ index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
+ num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
- num_sectors = sum->len / sctx->fs_info->sectorsize;
- memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
- if (index == num_sectors - 1) {
- list_del(&sum->list);
- kfree(sum);
+ memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
+ sctx->fs_info->csum_size);
+
+ /* Cleanup the range if we're at the end of the csum range */
+ if (index == num_sectors - 1)
+ drop_csum_range(sctx, sum);
+ break;
}
+ if (!found)
+ return 0;
return 1;
}
/* scrub extent tries to collect up to 64 kB for each bio */
static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
- u64 logical, u64 len,
+ u64 logical, u32 len,
u64 physical, struct btrfs_device *dev, u64 flags,
u64 gen, int mirror_num, u64 physical_for_dev_replace)
{
@@ -2432,7 +2474,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
}
while (len) {
- u64 l = min_t(u64, len, blocksize);
+ u32 l = min(len, blocksize);
int have_csum = 0;
if (flags & BTRFS_EXTENT_FLAG_DATA) {
@@ -2442,7 +2484,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
++sctx->stat.no_csum;
}
ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
- mirror_num, have_csum ? csum : NULL, 0,
+ mirror_num, have_csum ? csum : NULL,
physical_for_dev_replace);
if (ret)
return ret;
@@ -2455,14 +2497,17 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
}
static int scrub_pages_for_parity(struct scrub_parity *sparity,
- u64 logical, u64 len,
+ u64 logical, u32 len,
u64 physical, struct btrfs_device *dev,
u64 flags, u64 gen, int mirror_num, u8 *csum)
{
struct scrub_ctx *sctx = sparity->sctx;
struct scrub_block *sblock;
+ const u32 sectorsize = sctx->fs_info->sectorsize;
int index;
+ ASSERT(IS_ALIGNED(len, sectorsize));
+
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
if (!sblock) {
spin_lock(&sctx->stat_lock);
@@ -2481,7 +2526,6 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
for (index = 0; len > 0; index++) {
struct scrub_page *spage;
- u64 l = min_t(u64, len, PAGE_SIZE);
spage = kzalloc(sizeof(*spage), GFP_KERNEL);
if (!spage) {
@@ -2508,7 +2552,7 @@ leave_nomem:
spage->mirror_num = mirror_num;
if (csum) {
spage->have_csum = 1;
- memcpy(spage->csum, csum, sctx->csum_size);
+ memcpy(spage->csum, csum, sctx->fs_info->csum_size);
} else {
spage->have_csum = 0;
}
@@ -2516,9 +2560,12 @@ leave_nomem:
spage->page = alloc_page(GFP_KERNEL);
if (!spage->page)
goto leave_nomem;
- len -= l;
- logical += l;
- physical += l;
+
+
+ /* Iterate over the stripe range in sectorsize steps */
+ len -= sectorsize;
+ logical += sectorsize;
+ physical += sectorsize;
}
WARN_ON(sblock->page_count == 0);
@@ -2539,7 +2586,7 @@ leave_nomem:
}
static int scrub_extent_for_parity(struct scrub_parity *sparity,
- u64 logical, u64 len,
+ u64 logical, u32 len,
u64 physical, struct btrfs_device *dev,
u64 flags, u64 gen, int mirror_num)
{
@@ -2563,7 +2610,7 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
}
while (len) {
- u64 l = min_t(u64, len, blocksize);
+ u32 l = min(len, blocksize);
int have_csum = 0;
if (flags & BTRFS_EXTENT_FLAG_DATA) {
@@ -2767,7 +2814,8 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
u64 generation;
u64 extent_logical;
u64 extent_physical;
- u64 extent_len;
+ /* Check the comment in scrub_stripe() for why u32 is enough here */
+ u32 extent_len;
u64 mapped_length;
struct btrfs_device *extent_dev;
struct scrub_parity *sparity;
@@ -2776,7 +2824,8 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
- nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
+ ASSERT(map->stripe_len <= U32_MAX);
+ nsectors = map->stripe_len >> fs_info->sectorsize_bits;
bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS);
@@ -2787,6 +2836,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
return -ENOMEM;
}
+ ASSERT(map->stripe_len <= U32_MAX);
sparity->stripe_len = map->stripe_len;
sparity->nsectors = nsectors;
sparity->sctx = sctx;
@@ -2881,6 +2931,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
}
again:
extent_logical = key.objectid;
+ ASSERT(bytes <= U32_MAX);
extent_len = bytes;
if (extent_logical < logic_start) {
@@ -2959,9 +3010,11 @@ next:
logic_start += map->stripe_len;
}
out:
- if (ret < 0)
+ if (ret < 0) {
+ ASSERT(logic_end - logic_start <= U32_MAX);
scrub_parity_mark_sectors_error(sparity, logic_start,
logic_end - logic_start);
+ }
scrub_parity_put(sparity);
scrub_submit(sctx);
mutex_lock(&sctx->wr_lock);
@@ -3003,7 +3056,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
u64 offset;
u64 extent_logical;
u64 extent_physical;
- u64 extent_len;
+ /*
+ * Unlike chunk length, extent length should never go beyond
+ * BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here.
+ */
+ u32 extent_len;
u64 stripe_logical;
u64 stripe_end;
struct btrfs_device *extent_dev;
@@ -3084,17 +3141,21 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
key_end.offset = (u64)-1;
reada1 = btrfs_reada_add(root, &key, &key_end);
- key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key.type = BTRFS_EXTENT_CSUM_KEY;
- key.offset = logical;
- key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
- key_end.type = BTRFS_EXTENT_CSUM_KEY;
- key_end.offset = logic_end;
- reada2 = btrfs_reada_add(csum_root, &key, &key_end);
+ if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
+ key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+ key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = logical;
+ key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+ key_end.type = BTRFS_EXTENT_CSUM_KEY;
+ key_end.offset = logic_end;
+ reada2 = btrfs_reada_add(csum_root, &key, &key_end);
+ } else {
+ reada2 = NULL;
+ }
if (!IS_ERR(reada1))
btrfs_reada_wait(reada1);
- if (!IS_ERR(reada2))
+ if (!IS_ERR_OR_NULL(reada2))
btrfs_reada_wait(reada2);
@@ -3248,6 +3309,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
again:
extent_logical = key.objectid;
+ ASSERT(bytes <= U32_MAX);
extent_len = bytes;
/*
@@ -3704,10 +3766,12 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
if (bytenr + BTRFS_SUPER_INFO_SIZE >
scrub_dev->commit_total_bytes)
break;
+ if (!btrfs_check_super_location(scrub_dev, bytenr))
+ continue;
ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
- NULL, 1, bytenr);
+ NULL, bytenr);
if (ret)
return ret;
}
@@ -3821,14 +3885,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return -EINVAL;
}
- if (fs_info->sectorsize != PAGE_SIZE) {
- /* not supported for data w/o checksums */
- btrfs_err_rl(fs_info,
- "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
- fs_info->sectorsize, PAGE_SIZE);
- return -EINVAL;
- }
-
if (fs_info->nodesize >
PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
@@ -3855,7 +3911,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
goto out_free_ctx;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -4032,7 +4088,7 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct scrub_ctx *sctx = NULL;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
if (dev)
sctx = dev->scrub_ctx;
if (sctx)
@@ -4043,7 +4099,7 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
}
static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
- u64 extent_logical, u64 extent_len,
+ u64 extent_logical, u32 extent_len,
u64 *extent_physical,
struct btrfs_device **extent_dev,
int *extent_mirror_num)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 340c76a12ce1..d719a2755a40 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2410,7 +2410,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
sctx->send_root->root_item.uuid);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
- le64_to_cpu(sctx->send_root->root_item.ctransid));
+ btrfs_root_ctransid(&sctx->send_root->root_item));
if (parent_root) {
if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
@@ -2419,7 +2419,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
parent_root->root_item.uuid);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
- le64_to_cpu(sctx->parent_root->root_item.ctransid));
+ btrfs_root_ctransid(&sctx->parent_root->root_item));
}
ret = send_cmd(sctx);
@@ -5101,7 +5101,7 @@ static int send_clone(struct send_ctx *sctx,
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
clone_root->root->root_item.uuid);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
- le64_to_cpu(clone_root->root->root_item.ctransid));
+ btrfs_root_ctransid(&clone_root->root->root_item));
TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
clone_root->offset);
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c46be27be700..8260f8bb3ff0 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -57,8 +57,9 @@ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
const void *ptr, unsigned long off) \
{ \
const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long idx = member_offset >> PAGE_SHIFT; \
- const unsigned long oip = offset_in_page(member_offset); \
+ const unsigned long idx = get_eb_page_index(member_offset); \
+ const unsigned long oip = get_eb_offset_in_page(token->eb, \
+ member_offset); \
const int size = sizeof(u##bits); \
u8 lebytes[sizeof(u##bits)]; \
const int part = PAGE_SIZE - oip; \
@@ -85,8 +86,8 @@ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off) \
{ \
const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long oip = offset_in_page(member_offset); \
- const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
+ const unsigned long idx = get_eb_page_index(member_offset); \
char *kaddr = page_address(eb->pages[idx]); \
const int size = sizeof(u##bits); \
const int part = PAGE_SIZE - oip; \
@@ -106,8 +107,9 @@ void btrfs_set_token_##bits(struct btrfs_map_token *token, \
u##bits val) \
{ \
const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long idx = member_offset >> PAGE_SHIFT; \
- const unsigned long oip = offset_in_page(member_offset); \
+ const unsigned long idx = get_eb_page_index(member_offset); \
+ const unsigned long oip = get_eb_offset_in_page(token->eb, \
+ member_offset); \
const int size = sizeof(u##bits); \
u8 lebytes[sizeof(u##bits)]; \
const int part = PAGE_SIZE - oip; \
@@ -136,8 +138,8 @@ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
unsigned long off, u##bits val) \
{ \
const unsigned long member_offset = (unsigned long)ptr + off; \
- const unsigned long oip = offset_in_page(member_offset); \
- const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
+ const unsigned long idx = get_eb_page_index(member_offset); \
char *kaddr = page_address(eb->pages[idx]); \
const int size = sizeof(u##bits); \
const int part = PAGE_SIZE - oip; \
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8840a4fa81eb..022f20810089 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -44,6 +44,7 @@
#include "backref.h"
#include "space-info.h"
#include "sysfs.h"
+#include "zoned.h"
#include "tests/btrfs-tests.h"
#include "block-group.h"
#include "discard.h"
@@ -240,9 +241,13 @@ void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, .
vaf.fmt = fmt;
vaf.va = &args;
- if (__ratelimit(ratelimit))
- printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
- fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
+ if (__ratelimit(ratelimit)) {
+ if (fs_info)
+ printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
+ fs_info->sb->s_id, &vaf);
+ else
+ printk("%sBTRFS %s: %pV\n", lvl, type, &vaf);
+ }
va_end(args);
}
@@ -333,7 +338,6 @@ enum {
Opt_device,
Opt_fatal_errors,
Opt_flushoncommit, Opt_noflushoncommit,
- Opt_inode_cache, Opt_noinode_cache,
Opt_max_inline,
Opt_barrier, Opt_nobarrier,
Opt_datacow, Opt_nodatacow,
@@ -360,9 +364,13 @@ enum {
Opt_rescue,
Opt_usebackuproot,
Opt_nologreplay,
+ Opt_ignorebadroots,
+ Opt_ignoredatacsums,
+ Opt_rescue_all,
/* Deprecated options */
Opt_recovery,
+ Opt_inode_cache, Opt_noinode_cache,
/* Debugging options */
Opt_check_integrity,
@@ -455,9 +463,25 @@ static const match_table_t tokens = {
static const match_table_t rescue_tokens = {
{Opt_usebackuproot, "usebackuproot"},
{Opt_nologreplay, "nologreplay"},
+ {Opt_ignorebadroots, "ignorebadroots"},
+ {Opt_ignorebadroots, "ibadroots"},
+ {Opt_ignoredatacsums, "ignoredatacsums"},
+ {Opt_ignoredatacsums, "idatacsums"},
+ {Opt_rescue_all, "all"},
{Opt_err, NULL},
};
+static bool check_ro_option(struct btrfs_fs_info *fs_info, unsigned long opt,
+ const char *opt_name)
+{
+ if (fs_info->mount_opt & opt) {
+ btrfs_err(fs_info, "%s must be used with ro mount option",
+ opt_name);
+ return true;
+ }
+ return false;
+}
+
static int parse_rescue_options(struct btrfs_fs_info *info, const char *options)
{
char *opts;
@@ -487,6 +511,23 @@ static int parse_rescue_options(struct btrfs_fs_info *info, const char *options)
btrfs_set_and_info(info, NOLOGREPLAY,
"disabling log replay at mount time");
break;
+ case Opt_ignorebadroots:
+ btrfs_set_and_info(info, IGNOREBADROOTS,
+ "ignoring bad roots");
+ break;
+ case Opt_ignoredatacsums:
+ btrfs_set_and_info(info, IGNOREDATACSUMS,
+ "ignoring data csums");
+ break;
+ case Opt_rescue_all:
+ btrfs_info(info, "enabling all of the rescue options");
+ btrfs_set_and_info(info, IGNOREDATACSUMS,
+ "ignoring data csums");
+ btrfs_set_and_info(info, IGNOREBADROOTS,
+ "ignoring bad roots");
+ btrfs_set_and_info(info, NOLOGREPLAY,
+ "disabling log replay at mount time");
+ break;
case Opt_err:
btrfs_info(info, "unrecognized rescue option '%s'", p);
ret = -EINVAL;
@@ -511,7 +552,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
{
substring_t args[MAX_OPT_ARGS];
char *p, *num;
- u64 cache_gen;
int intarg;
int ret = 0;
char *compress_type;
@@ -521,11 +561,17 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
bool saved_compress_force;
int no_compress = 0;
- cache_gen = btrfs_super_cache_generation(info->super_copy);
if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
- else if (cache_gen)
- btrfs_set_opt(info->mount_opt, SPACE_CACHE);
+ else if (btrfs_free_space_cache_v1_active(info)) {
+ if (btrfs_is_zoned(info)) {
+ btrfs_info(info,
+ "zoned: clearing existing space cache");
+ btrfs_set_super_cache_generation(info->super_copy, 0);
+ } else {
+ btrfs_set_opt(info->mount_opt, SPACE_CACHE);
+ }
+ }
/*
* Even the options are empty, we still need to do extra check
@@ -832,14 +878,9 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
}
break;
case Opt_inode_cache:
- btrfs_warn(info,
- "the 'inode_cache' option is deprecated and will have no effect from 5.11");
- btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
- "enabling inode map caching");
- break;
case Opt_noinode_cache:
- btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
- "disabling inode map caching");
+ btrfs_warn(info,
+ "the 'inode_cache' option is deprecated and has no effect since 5.11");
break;
case Opt_clear_cache:
btrfs_set_and_info(info, CLEAR_CACHE,
@@ -968,14 +1009,14 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
}
}
check:
- /*
- * Extra check for current option against current flag
- */
- if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
- btrfs_err(info,
- "nologreplay must be used with ro mount option");
+ /* We're read-only, don't have to check. */
+ if (new_flags & SB_RDONLY)
+ goto out;
+
+ if (check_ro_option(info, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
+ check_ro_option(info, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
+ check_ro_option(info, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums"))
ret = -EINVAL;
- }
out:
if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, FREE_SPACE_TREE) &&
@@ -984,6 +1025,8 @@ out:
ret = -EINVAL;
}
+ if (!ret)
+ ret = btrfs_check_mountopts_zoned(info);
if (!ret && btrfs_test_opt(info, SPACE_CACHE))
btrfs_info(info, "disk space caching is enabled");
if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
@@ -1127,7 +1170,6 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
ret = -ENOMEM;
goto err;
}
- path->leave_spinning = 1;
name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!name) {
@@ -1256,7 +1298,6 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->leave_spinning = 1;
/*
* Find the "default" dir item which points to the root item that we
@@ -1383,11 +1424,18 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return btrfs_commit_transaction(trans);
}
+static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed)
+{
+ seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s);
+ *printed = true;
+}
+
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
const char *compress_type;
const char *subvol_name;
+ bool printed = false;
if (btrfs_test_opt(info, DEGRADED))
seq_puts(seq, ",degraded");
@@ -1420,7 +1468,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (btrfs_test_opt(info, NOTREELOG))
seq_puts(seq, ",notreelog");
if (btrfs_test_opt(info, NOLOGREPLAY))
- seq_puts(seq, ",rescue=nologreplay");
+ print_rescue_option(seq, "nologreplay", &printed);
+ if (btrfs_test_opt(info, USEBACKUPROOT))
+ print_rescue_option(seq, "usebackuproot", &printed);
+ if (btrfs_test_opt(info, IGNOREBADROOTS))
+ print_rescue_option(seq, "ignorebadroots", &printed);
+ if (btrfs_test_opt(info, IGNOREDATACSUMS))
+ print_rescue_option(seq, "ignoredatacsums", &printed);
if (btrfs_test_opt(info, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD_SYNC))
@@ -1429,9 +1483,9 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",discard=async");
if (!(info->sb->s_flags & SB_POSIXACL))
seq_puts(seq, ",noacl");
- if (btrfs_test_opt(info, SPACE_CACHE))
+ if (btrfs_free_space_cache_v1_active(info))
seq_puts(seq, ",space_cache");
- else if (btrfs_test_opt(info, FREE_SPACE_TREE))
+ else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
seq_puts(seq, ",space_cache=v2");
else
seq_puts(seq, ",nospace_cache");
@@ -1445,8 +1499,6 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",enospc_debug");
if (btrfs_test_opt(info, AUTO_DEFRAG))
seq_puts(seq, ",autodefrag");
- if (btrfs_test_opt(info, INODE_MAP_CACHE))
- seq_puts(seq, ",inode_cache");
if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
@@ -1810,6 +1862,8 @@ static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
unsigned long old_opts)
{
+ const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
+
/*
* We need to cleanup all defragable inodes if the autodefragment is
* close or the filesystem is read only.
@@ -1826,12 +1880,15 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
!btrfs_test_opt(fs_info, DISCARD_ASYNC))
btrfs_discard_cleanup(fs_info);
+
+ /* If we toggled space cache */
+ if (cache_opt != btrfs_free_space_cache_v1_active(fs_info))
+ btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
}
static int btrfs_remount(struct super_block *sb, int *flags, char *data)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- struct btrfs_root *root = fs_info->tree_root;
unsigned old_flags = sb->s_flags;
unsigned long old_opts = fs_info->mount_opt;
unsigned long old_compress_type = fs_info->compress_type;
@@ -1862,6 +1919,22 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_resize_thread_pool(fs_info,
fs_info->thread_pool_size, old_thread_pool_size);
+ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
+ btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ (!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
+ btrfs_warn(fs_info,
+ "remount supports changing free space tree only from ro to rw");
+ /* Make sure free space cache options match the state on disk */
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+ btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
+ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
+ }
+ if (btrfs_free_space_cache_v1_active(fs_info)) {
+ btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
+ btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
+ }
+ }
+
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
@@ -1924,39 +1997,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
- ret = btrfs_cleanup_fs_roots(fs_info);
- if (ret)
- goto restore;
-
- /* recover relocation */
- mutex_lock(&fs_info->cleaner_mutex);
- ret = btrfs_recover_relocation(root);
- mutex_unlock(&fs_info->cleaner_mutex);
- if (ret)
- goto restore;
-
- ret = btrfs_resume_balance_async(fs_info);
+ /*
+ * NOTE: when remounting with a change that does writes, don't
+ * put it anywhere above this point, as we are not sure to be
+ * safe to write until we pass the above checks.
+ */
+ ret = btrfs_start_pre_rw_mount(fs_info);
if (ret)
goto restore;
- ret = btrfs_resume_dev_replace_async(fs_info);
- if (ret) {
- btrfs_warn(fs_info, "failed to resume dev_replace");
- goto restore;
- }
-
- btrfs_qgroup_rescan_resume(fs_info);
-
- if (!fs_info->uuid_root) {
- btrfs_info(fs_info, "creating UUID tree");
- ret = btrfs_create_uuid_tree(fs_info);
- if (ret) {
- btrfs_warn(fs_info,
- "failed to create the UUID tree %d",
- ret);
- goto restore;
- }
- }
sb->s_flags &= ~SB_RDONLY;
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
@@ -1970,6 +2019,7 @@ out:
wake_up_process(fs_info->transaction_kthread);
btrfs_remount_cleanup(fs_info, old_opts);
+ btrfs_clear_oneshot_options(fs_info);
clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
return 0;
@@ -2156,7 +2206,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 total_used = 0;
u64 total_free_data = 0;
u64 total_free_meta = 0;
- int bits = dentry->d_sb->s_blocksize_bits;
+ u32 bits = fs_info->sectorsize_bits;
__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
unsigned factor = 1;
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
@@ -2463,6 +2513,11 @@ static void __init btrfs_print_mod_info(void)
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
", ref-verify=on"
#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ ", zoned=yes"
+#else
+ ", zoned=no"
+#endif
;
pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
}
@@ -2523,8 +2578,6 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_end_io_wq;
- btrfs_init_lockdep();
-
btrfs_print_mod_info();
err = btrfs_run_sanity_tests();
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 279d9262b676..4522a1c4cd08 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -263,6 +263,10 @@ BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
+/* Remove once support for zoned allocation is feature complete */
+#ifdef CONFIG_BTRFS_DEBUG
+BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
+#endif
static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(mixed_backref),
@@ -278,6 +282,9 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(metadata_uuid),
BTRFS_FEAT_ATTR_PTR(free_space_tree),
BTRFS_FEAT_ATTR_PTR(raid1c34),
+#ifdef CONFIG_BTRFS_DEBUG
+ BTRFS_FEAT_ATTR_PTR(zoned),
+#endif
NULL
};
@@ -329,10 +336,35 @@ static ssize_t send_stream_version_show(struct kobject *kobj,
}
BTRFS_ATTR(static_feature, send_stream_version, send_stream_version_show);
+static const char *rescue_opts[] = {
+ "usebackuproot",
+ "nologreplay",
+ "ignorebadroots",
+ "ignoredatacsums",
+ "all",
+};
+
+static ssize_t supported_rescue_options_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ ssize_t ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rescue_opts); i++)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
+ (i ? " " : ""), rescue_opts[i]);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ return ret;
+}
+BTRFS_ATTR(static_feature, supported_rescue_options,
+ supported_rescue_options_show);
+
static struct attribute *btrfs_supported_static_feature_attrs[] = {
BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
BTRFS_ATTR_PTR(static_feature, supported_checksums),
BTRFS_ATTR_PTR(static_feature, send_stream_version),
+ BTRFS_ATTR_PTR(static_feature, supported_rescue_options),
NULL
};
@@ -433,7 +465,8 @@ static ssize_t btrfs_discard_iops_limit_store(struct kobject *kobj,
return -EINVAL;
WRITE_ONCE(discard_ctl->iops_limit, iops_limit);
-
+ btrfs_discard_calc_delay(discard_ctl);
+ btrfs_discard_schedule_work(discard_ctl, true);
return len;
}
BTRFS_ATTR_RW(discard, iops_limit, btrfs_discard_iops_limit_show,
@@ -463,7 +496,7 @@ static ssize_t btrfs_discard_kbps_limit_store(struct kobject *kobj,
return -EINVAL;
WRITE_ONCE(discard_ctl->kbps_limit, kbps_limit);
-
+ btrfs_discard_schedule_work(discard_ctl, true);
return len;
}
BTRFS_ATTR_RW(discard, kbps_limit, btrfs_discard_kbps_limit_show,
@@ -854,6 +887,82 @@ static ssize_t btrfs_exclusive_operation_show(struct kobject *kobj,
}
BTRFS_ATTR(, exclusive_operation, btrfs_exclusive_operation_show);
+static ssize_t btrfs_generation_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", fs_info->generation);
+}
+BTRFS_ATTR(, generation, btrfs_generation_show);
+
+/*
+ * Look for an exact string @string in @buffer with possible leading or
+ * trailing whitespace
+ */
+static bool strmatch(const char *buffer, const char *string)
+{
+ const size_t len = strlen(string);
+
+ /* Skip leading whitespace */
+ buffer = skip_spaces(buffer);
+
+ /* Match entire string, check if the rest is whitespace or empty */
+ if (strncmp(string, buffer, len) == 0 &&
+ strlen(skip_spaces(buffer + len)) == 0)
+ return true;
+
+ return false;
+}
+
+static const char * const btrfs_read_policy_name[] = { "pid" };
+
+static ssize_t btrfs_read_policy_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
+ ssize_t ret = 0;
+ int i;
+
+ for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
+ if (fs_devices->read_policy == i)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s[%s]",
+ (ret == 0 ? "" : " "),
+ btrfs_read_policy_name[i]);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
+ (ret == 0 ? "" : " "),
+ btrfs_read_policy_name[i]);
+ }
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+ return ret;
+}
+
+static ssize_t btrfs_read_policy_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj);
+ int i;
+
+ for (i = 0; i < BTRFS_NR_READ_POLICY; i++) {
+ if (strmatch(buf, btrfs_read_policy_name[i])) {
+ if (i != fs_devices->read_policy) {
+ fs_devices->read_policy = i;
+ btrfs_info(fs_devices->fs_info,
+ "read policy set to '%s'",
+ btrfs_read_policy_name[i]);
+ }
+ return len;
+ }
+ }
+
+ return -EINVAL;
+}
+BTRFS_ATTR_RW(, read_policy, btrfs_read_policy_show, btrfs_read_policy_store);
+
static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, label),
BTRFS_ATTR_PTR(, nodesize),
@@ -863,6 +972,8 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, metadata_uuid),
BTRFS_ATTR_PTR(, checksum),
BTRFS_ATTR_PTR(, exclusive_operation),
+ BTRFS_ATTR_PTR(, generation),
+ BTRFS_ATTR_PTR(, read_policy),
NULL,
};
@@ -1207,7 +1318,7 @@ static const char *alloc_name(u64 flags)
default:
WARN_ON(1);
return "invalid-combination";
- };
+ }
}
/*
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 999c14e5d0bd..8ca334d554af 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -134,6 +134,7 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
+ fs_info->sectorsize_bits = ilog2(sectorsize);
set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
test_mnt->mnt_sb->s_fs_info = fs_info;
@@ -224,7 +225,7 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
INIT_LIST_HEAD(&cache->bg_list);
- btrfs_init_free_space_ctl(cache);
+ btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
mutex_init(&cache->free_space_lock);
return cache;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index df7ce874a74b..73e96d505f4f 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -379,54 +379,50 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
- unsigned long len;
unsigned long *bitmap = NULL;
struct extent_buffer *eb = NULL;
int ret;
test_msg("running extent buffer bitmap tests");
- /*
- * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
- * BTRFS_MAX_METADATA_BLOCKSIZE.
- */
- len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
- ? sectorsize * 4 : sectorsize;
-
- fs_info = btrfs_alloc_dummy_fs_info(len, len);
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_std_err(TEST_ALLOC_FS_INFO);
return -ENOMEM;
}
- bitmap = kmalloc(len, GFP_KERNEL);
+ bitmap = kmalloc(nodesize, GFP_KERNEL);
if (!bitmap) {
test_err("couldn't allocate test bitmap");
ret = -ENOMEM;
goto out;
}
- eb = __alloc_dummy_extent_buffer(fs_info, 0, len);
+ eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM;
goto out;
}
- ret = __test_eb_bitmaps(bitmap, eb, len);
+ ret = __test_eb_bitmaps(bitmap, eb, nodesize);
if (ret)
goto out;
- /* Do it over again with an extent buffer which isn't page-aligned. */
free_extent_buffer(eb);
- eb = __alloc_dummy_extent_buffer(fs_info, nodesize / 2, len);
+
+ /*
+ * Test again for case where the tree block is sectorsize aligned but
+ * not nodesize aligned.
+ */
+ eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
if (!eb) {
test_std_err(TEST_ALLOC_ROOT);
ret = -ENOMEM;
goto out;
}
- ret = __test_eb_bitmaps(bitmap, eb, len);
+ ret = __test_eb_bitmaps(bitmap, eb, nodesize);
out:
free_extent_buffer(eb);
kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index aebdf23f0cdd..8f05c1eb833f 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -399,7 +399,6 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
u64 offset;
u64 max_extent_size;
const struct btrfs_free_space_op test_free_space_ops = {
- .recalc_thresholds = cache->free_space_ctl->op->recalc_thresholds,
.use_bitmap = test_use_bitmap,
};
const struct btrfs_free_space_op *orig_free_space_ops;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ce1ca8e73c2d..f3137285a9e2 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -36,7 +36,6 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
return -ENOMEM;
}
- path->leave_spinning = 1;
ret = btrfs_insert_empty_item(&trans, root, path, &ins, size);
if (ret) {
test_err("couldn't insert ref %d", ret);
@@ -86,7 +85,6 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
return -ENOMEM;
}
- path->leave_spinning = 1;
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_err("couldn't find extent ref");
@@ -135,7 +133,6 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
test_std_err(TEST_ALLOC_ROOT);
return -ENOMEM;
}
- path->leave_spinning = 1;
ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
if (ret) {
@@ -170,7 +167,6 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
return -ENOMEM;
}
- path->leave_spinning = 1;
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_err("couldn't find extent ref");
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 52ada47aff50..8e0f7a1029c6 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -16,7 +16,6 @@
#include "transaction.h"
#include "locking.h"
#include "tree-log.h"
-#include "inode-map.h"
#include "volumes.h"
#include "dev-replace.h"
#include "qgroup.h"
@@ -155,6 +154,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root, *tmp;
+ struct btrfs_caching_control *caching_ctl, *next;
down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
@@ -162,8 +162,6 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
list_del_init(&root->dirty_list);
free_extent_buffer(root->commit_root);
root->commit_root = btrfs_root_node(root);
- if (is_fstree(root->root_key.objectid))
- btrfs_unpin_free_ino(root);
extent_io_tree_release(&root->dirty_log_pages);
btrfs_qgroup_clean_swapped_blocks(root);
}
@@ -180,6 +178,47 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
spin_lock(&cur_trans->dropped_roots_lock);
}
spin_unlock(&cur_trans->dropped_roots_lock);
+
+ /*
+ * We have to update the last_byte_to_unpin under the commit_root_sem,
+ * at the same time we swap out the commit roots.
+ *
+ * This is because we must have a real view of the last spot the caching
+ * kthreads were while caching. Consider the following views of the
+ * extent tree for a block group
+ *
+ * commit root
+ * +----+----+----+----+----+----+----+
+ * |\\\\| |\\\\|\\\\| |\\\\|\\\\|
+ * +----+----+----+----+----+----+----+
+ * 0 1 2 3 4 5 6 7
+ *
+ * new commit root
+ * +----+----+----+----+----+----+----+
+ * | | | |\\\\| | |\\\\|
+ * +----+----+----+----+----+----+----+
+ * 0 1 2 3 4 5 6 7
+ *
+ * If the cache_ctl->progress was at 3, then we are only allowed to
+ * unpin [0,1) and [2,3], because the caching thread has already
+ * processed those extents. We are not allowed to unpin [5,6), because
+ * the caching thread will re-start it's search from 3, and thus find
+ * the hole from [4,6) to add to the free space cache.
+ */
+ spin_lock(&fs_info->block_group_cache_lock);
+ list_for_each_entry_safe(caching_ctl, next,
+ &fs_info->caching_block_groups, list) {
+ struct btrfs_block_group *cache = caching_ctl->block_group;
+
+ if (btrfs_block_group_done(cache)) {
+ cache->last_byte_to_unpin = (u64)-1;
+ list_del_init(&caching_ctl->list);
+ btrfs_put_caching_control(caching_ctl);
+ } else {
+ cache->last_byte_to_unpin = caching_ctl->progress;
+ }
+ }
+ spin_unlock(&fs_info->block_group_cache_lock);
up_write(&fs_info->commit_root_sem);
}
@@ -856,24 +895,24 @@ void btrfs_throttle(struct btrfs_fs_info *fs_info)
wait_current_trans(fs_info);
}
-static int should_end_transaction(struct btrfs_trans_handle *trans)
+static bool should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
if (btrfs_check_space_for_delayed_refs(fs_info))
- return 1;
+ return true;
return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
}
-int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
+bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_transaction *cur_trans = trans->transaction;
smp_mb();
if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
cur_trans->delayed_refs.flushing)
- return 1;
+ return true;
return should_end_transaction(trans);
}
@@ -1300,8 +1339,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
btrfs_free_log(trans, root);
btrfs_update_reloc_root(trans, root);
- btrfs_save_ino_cache(root, trans);
-
/* see comments in should_cow_block() */
clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
smp_mb__after_atomic();
@@ -1598,8 +1635,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
}
- btrfs_set_lock_blocking_write(old);
-
ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
/* clean up in any case */
btrfs_tree_unlock(old);
@@ -1681,7 +1716,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
dentry->d_name.len * 2);
parent_inode->i_mtime = parent_inode->i_ctime =
current_time(parent_inode);
- ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
+ ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -1761,6 +1796,8 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
super->root_level = root_item->level;
if (btrfs_test_opt(fs_info, SPACE_CACHE))
super->cache_generation = root_item->generation;
+ else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
+ super->cache_generation = 0;
if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
super->uuid_tree_generation = root_item->generation;
}
@@ -1956,10 +1993,8 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
}
}
-static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
+static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
-
/*
* We use writeback_inodes_sb here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
@@ -1969,50 +2004,15 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
*/
- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
- } else {
- struct btrfs_pending_snapshot *pending;
- struct list_head *head = &trans->transaction->pending_snapshots;
-
- /*
- * Flush dellaloc for any root that is going to be snapshotted.
- * This is done to avoid a corrupted version of files, in the
- * snapshots, that had both buffered and direct IO writes (even
- * if they were done sequentially) due to an unordered update of
- * the inode's size on disk.
- */
- list_for_each_entry(pending, head, list) {
- int ret;
-
- ret = btrfs_start_delalloc_snapshot(pending->root);
- if (ret)
- return ret;
- }
- }
return 0;
}
-static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
+static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
-
- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
- } else {
- struct btrfs_pending_snapshot *pending;
- struct list_head *head = &trans->transaction->pending_snapshots;
-
- /*
- * Wait for any dellaloc that we started previously for the roots
- * that are going to be snapshotted. This is to avoid a corrupted
- * version of files in the snapshots that had both buffered and
- * direct IO writes (even if they were done sequentially).
- */
- list_for_each_entry(pending, head, list)
- btrfs_wait_ordered_extents(pending->root,
- U64_MAX, 0, U64_MAX);
- }
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2150,7 +2150,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
extwriter_counter_dec(cur_trans, trans->type);
- ret = btrfs_start_delalloc_flush(trans);
+ ret = btrfs_start_delalloc_flush(fs_info);
if (ret)
goto cleanup_transaction;
@@ -2166,7 +2166,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto cleanup_transaction;
- btrfs_wait_delalloc_flush(trans);
+ btrfs_wait_delalloc_flush(fs_info);
/*
* Wait for all ordered extents started by a fast fsync that joined this
@@ -2293,8 +2293,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
goto unlock_tree_log;
}
- btrfs_prepare_extent_commit(fs_info);
-
cur_trans = fs_info->running_transaction;
btrfs_set_root_node(&fs_info->tree_root->root_item,
@@ -2435,10 +2433,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
btrfs_kill_all_delayed_nodes(root);
- if (root->ino_cache_inode) {
- iput(root->ino_cache_inode);
- root->ino_cache_inode = NULL;
- }
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
@@ -2459,16 +2453,6 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
if (!prev)
return;
- bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
- if (prev & bit)
- btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
- prev &= ~bit;
-
- bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
- if (prev & bit)
- btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
- prev &= ~bit;
-
bit = 1 << BTRFS_PENDING_COMMIT;
if (prev & bit)
btrfs_debug(fs_info, "pending commit done");
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 858d9153a1cd..31ca81bad822 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -112,7 +112,6 @@ struct btrfs_transaction {
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
#define BTRFS_SEND_TRANS_STUB ((void *)1)
-#define BTRFS_DIO_SYNC_STUB ((void *)2)
struct btrfs_trans_handle {
u64 transid;
@@ -219,7 +218,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
int wait_for_unblock);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
-int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
+bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
void btrfs_throttle(struct btrfs_fs_info *fs_info);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ea2bb4cb5890..028e733e42f3 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -100,7 +100,8 @@ static void file_extent_err(const struct extent_buffer *eb, int slot,
*/
#define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \
({ \
- if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
+ if (unlikely(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), \
+ (alignment)))) \
file_extent_err((leaf), (slot), \
"invalid %s for file extent, have %llu, should be aligned to %u", \
(#name), btrfs_file_extent_##name((leaf), (fi)), \
@@ -203,7 +204,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
u32 item_size = btrfs_item_size_nr(leaf, slot);
u64 extent_end;
- if (!IS_ALIGNED(key->offset, sectorsize)) {
+ if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
file_extent_err(leaf, slot,
"unaligned file_offset for file extent, have %llu should be aligned to %u",
key->offset, sectorsize);
@@ -216,7 +217,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
* But if objectids mismatch, it means we have a missing
* INODE_ITEM.
*/
- if (!check_prev_ino(leaf, key, slot, prev_key))
+ if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
return -EUCLEAN;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
@@ -225,14 +226,15 @@ static int check_extent_data_item(struct extent_buffer *leaf,
* Make sure the item contains at least inline header, so the file
* extent type is not some garbage.
*/
- if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) {
+ if (unlikely(item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START)) {
file_extent_err(leaf, slot,
"invalid item size, have %u expect [%zu, %u)",
item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
SZ_4K);
return -EUCLEAN;
}
- if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) {
+ if (unlikely(btrfs_file_extent_type(leaf, fi) >=
+ BTRFS_NR_FILE_EXTENT_TYPES)) {
file_extent_err(leaf, slot,
"invalid type for file extent, have %u expect range [0, %u]",
btrfs_file_extent_type(leaf, fi),
@@ -244,14 +246,15 @@ static int check_extent_data_item(struct extent_buffer *leaf,
* Support for new compression/encryption must introduce incompat flag,
* and must be caught in open_ctree().
*/
- if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) {
+ if (unlikely(btrfs_file_extent_compression(leaf, fi) >=
+ BTRFS_NR_COMPRESS_TYPES)) {
file_extent_err(leaf, slot,
"invalid compression for file extent, have %u expect range [0, %u]",
btrfs_file_extent_compression(leaf, fi),
BTRFS_NR_COMPRESS_TYPES - 1);
return -EUCLEAN;
}
- if (btrfs_file_extent_encryption(leaf, fi)) {
+ if (unlikely(btrfs_file_extent_encryption(leaf, fi))) {
file_extent_err(leaf, slot,
"invalid encryption for file extent, have %u expect 0",
btrfs_file_extent_encryption(leaf, fi));
@@ -259,7 +262,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
}
if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
/* Inline extent must have 0 as key offset */
- if (key->offset) {
+ if (unlikely(key->offset)) {
file_extent_err(leaf, slot,
"invalid file_offset for inline file extent, have %llu expect 0",
key->offset);
@@ -272,8 +275,8 @@ static int check_extent_data_item(struct extent_buffer *leaf,
return 0;
/* Uncompressed inline extent size must match item size */
- if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
- btrfs_file_extent_ram_bytes(leaf, fi)) {
+ if (unlikely(item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
+ btrfs_file_extent_ram_bytes(leaf, fi))) {
file_extent_err(leaf, slot,
"invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
@@ -284,22 +287,22 @@ static int check_extent_data_item(struct extent_buffer *leaf,
}
/* Regular or preallocated extent has fixed item size */
- if (item_size != sizeof(*fi)) {
+ if (unlikely(item_size != sizeof(*fi))) {
file_extent_err(leaf, slot,
"invalid item size for reg/prealloc file extent, have %u expect %zu",
item_size, sizeof(*fi));
return -EUCLEAN;
}
- if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
- CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
- CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
- CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
- CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
+ if (unlikely(CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
+ CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
+ CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
+ CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
+ CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize)))
return -EUCLEAN;
/* Catch extent end overflow */
- if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
- key->offset, &extent_end)) {
+ if (unlikely(check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
+ key->offset, &extent_end))) {
file_extent_err(leaf, slot,
"extent end overflow, have file offset %llu extent num bytes %llu",
key->offset,
@@ -320,7 +323,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
prev_fi = btrfs_item_ptr(leaf, slot - 1,
struct btrfs_file_extent_item);
prev_end = file_extent_end(leaf, prev_key, prev_fi);
- if (prev_end > key->offset) {
+ if (unlikely(prev_end > key->offset)) {
file_extent_err(leaf, slot - 1,
"file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
prev_end, key->offset);
@@ -336,21 +339,21 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
u32 sectorsize = fs_info->sectorsize;
- u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
+ const u32 csumsize = fs_info->csum_size;
- if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
+ if (unlikely(key->objectid != BTRFS_EXTENT_CSUM_OBJECTID)) {
generic_err(leaf, slot,
"invalid key objectid for csum item, have %llu expect %llu",
key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
return -EUCLEAN;
}
- if (!IS_ALIGNED(key->offset, sectorsize)) {
+ if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
generic_err(leaf, slot,
"unaligned key offset for csum item, have %llu should be aligned to %u",
key->offset, sectorsize);
return -EUCLEAN;
}
- if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
+ if (unlikely(!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize))) {
generic_err(leaf, slot,
"unaligned item size for csum item, have %u should be aligned to %u",
btrfs_item_size_nr(leaf, slot), csumsize);
@@ -363,7 +366,7 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
prev_csum_end = (prev_item_size / csumsize) * sectorsize;
prev_csum_end += prev_key->offset;
- if (prev_csum_end > key->offset) {
+ if (unlikely(prev_csum_end > key->offset)) {
generic_err(leaf, slot - 1,
"csum end range (%llu) goes beyond the start range (%llu) of the next csum item",
prev_csum_end, key->offset);
@@ -388,15 +391,16 @@ static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key,
/* For XATTR_ITEM, location key should be all 0 */
if (item_key.type == BTRFS_XATTR_ITEM_KEY) {
- if (key->type != 0 || key->objectid != 0 || key->offset != 0)
+ if (unlikely(key->objectid != 0 || key->type != 0 ||
+ key->offset != 0))
return -EUCLEAN;
return 0;
}
- if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
- key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
- key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
- key->objectid != BTRFS_FREE_INO_OBJECTID) {
+ if (unlikely((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
+ key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
+ key->objectid != BTRFS_FREE_INO_OBJECTID)) {
if (is_inode_item) {
generic_err(leaf, slot,
"invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
@@ -414,7 +418,7 @@ static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key,
}
return -EUCLEAN;
}
- if (key->offset != 0) {
+ if (unlikely(key->offset != 0)) {
if (is_inode_item)
inode_item_err(leaf, slot,
"invalid key offset: has %llu expect 0",
@@ -438,7 +442,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);
/* No such tree id */
- if (key->objectid == 0) {
+ if (unlikely(key->objectid == 0)) {
if (is_root_item)
generic_err(leaf, slot, "invalid root id 0");
else
@@ -448,7 +452,7 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
}
/* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */
- if (!is_fstree(key->objectid) && !is_root_item) {
+ if (unlikely(!is_fstree(key->objectid) && !is_root_item)) {
dir_item_err(leaf, slot,
"invalid location key objectid, have %llu expect [%llu, %llu]",
key->objectid, BTRFS_FIRST_FREE_OBJECTID,
@@ -464,7 +468,8 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
* So here we only check offset for reloc tree whose key->offset must
* be a valid tree.
*/
- if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
+ if (unlikely(key->objectid == BTRFS_TREE_RELOC_OBJECTID &&
+ key->offset == 0)) {
generic_err(leaf, slot, "invalid root id 0 for reloc tree");
return -EUCLEAN;
}
@@ -480,8 +485,9 @@ static int check_dir_item(struct extent_buffer *leaf,
u32 item_size = btrfs_item_size_nr(leaf, slot);
u32 cur = 0;
- if (!check_prev_ino(leaf, key, slot, prev_key))
+ if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
return -EUCLEAN;
+
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
while (cur < item_size) {
struct btrfs_key location_key;
@@ -494,7 +500,7 @@ static int check_dir_item(struct extent_buffer *leaf,
int ret;
/* header itself should not cross item boundary */
- if (cur + sizeof(*di) > item_size) {
+ if (unlikely(cur + sizeof(*di) > item_size)) {
dir_item_err(leaf, slot,
"dir item header crosses item boundary, have %zu boundary %u",
cur + sizeof(*di), item_size);
@@ -505,12 +511,12 @@ static int check_dir_item(struct extent_buffer *leaf,
btrfs_dir_item_key_to_cpu(leaf, di, &location_key);
if (location_key.type == BTRFS_ROOT_ITEM_KEY) {
ret = check_root_key(leaf, &location_key, slot);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
} else if (location_key.type == BTRFS_INODE_ITEM_KEY ||
location_key.type == 0) {
ret = check_inode_key(leaf, &location_key, slot);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
} else {
dir_item_err(leaf, slot,
@@ -522,22 +528,22 @@ static int check_dir_item(struct extent_buffer *leaf,
/* dir type check */
dir_type = btrfs_dir_type(leaf, di);
- if (dir_type >= BTRFS_FT_MAX) {
+ if (unlikely(dir_type >= BTRFS_FT_MAX)) {
dir_item_err(leaf, slot,
"invalid dir item type, have %u expect [0, %u)",
dir_type, BTRFS_FT_MAX);
return -EUCLEAN;
}
- if (key->type == BTRFS_XATTR_ITEM_KEY &&
- dir_type != BTRFS_FT_XATTR) {
+ if (unlikely(key->type == BTRFS_XATTR_ITEM_KEY &&
+ dir_type != BTRFS_FT_XATTR)) {
dir_item_err(leaf, slot,
"invalid dir item type for XATTR key, have %u expect %u",
dir_type, BTRFS_FT_XATTR);
return -EUCLEAN;
}
- if (dir_type == BTRFS_FT_XATTR &&
- key->type != BTRFS_XATTR_ITEM_KEY) {
+ if (unlikely(dir_type == BTRFS_FT_XATTR &&
+ key->type != BTRFS_XATTR_ITEM_KEY)) {
dir_item_err(leaf, slot,
"xattr dir type found for non-XATTR key");
return -EUCLEAN;
@@ -550,13 +556,13 @@ static int check_dir_item(struct extent_buffer *leaf,
/* Name/data length check */
name_len = btrfs_dir_name_len(leaf, di);
data_len = btrfs_dir_data_len(leaf, di);
- if (name_len > max_name_len) {
+ if (unlikely(name_len > max_name_len)) {
dir_item_err(leaf, slot,
"dir item name len too long, have %u max %u",
name_len, max_name_len);
return -EUCLEAN;
}
- if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
+ if (unlikely(name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info))) {
dir_item_err(leaf, slot,
"dir item name and data len too long, have %u max %u",
name_len + data_len,
@@ -564,7 +570,7 @@ static int check_dir_item(struct extent_buffer *leaf,
return -EUCLEAN;
}
- if (data_len && dir_type != BTRFS_FT_XATTR) {
+ if (unlikely(data_len && dir_type != BTRFS_FT_XATTR)) {
dir_item_err(leaf, slot,
"dir item with invalid data len, have %u expect 0",
data_len);
@@ -574,7 +580,7 @@ static int check_dir_item(struct extent_buffer *leaf,
total_size = sizeof(*di) + name_len + data_len;
/* header and name/data should not cross item boundary */
- if (cur + total_size > item_size) {
+ if (unlikely(cur + total_size > item_size)) {
dir_item_err(leaf, slot,
"dir item data crosses item boundary, have %u boundary %u",
cur + total_size, item_size);
@@ -592,7 +598,7 @@ static int check_dir_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, namebuf,
(unsigned long)(di + 1), name_len);
name_hash = btrfs_name_hash(namebuf, name_len);
- if (key->offset != name_hash) {
+ if (unlikely(key->offset != name_hash)) {
dir_item_err(leaf, slot,
"name hash mismatch with key, have 0x%016x expect 0x%016llx",
name_hash, key->offset);
@@ -641,13 +647,13 @@ static int check_block_group_item(struct extent_buffer *leaf,
* Here we don't really care about alignment since extent allocator can
* handle it. We care more about the size.
*/
- if (key->offset == 0) {
+ if (unlikely(key->offset == 0)) {
block_group_err(leaf, slot,
"invalid block group size 0");
return -EUCLEAN;
}
- if (item_size != sizeof(bgi)) {
+ if (unlikely(item_size != sizeof(bgi))) {
block_group_err(leaf, slot,
"invalid item size, have %u expect %zu",
item_size, sizeof(bgi));
@@ -656,8 +662,8 @@ static int check_block_group_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
sizeof(bgi));
- if (btrfs_stack_block_group_chunk_objectid(&bgi) !=
- BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
+ if (unlikely(btrfs_stack_block_group_chunk_objectid(&bgi) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
block_group_err(leaf, slot,
"invalid block group chunk objectid, have %llu expect %llu",
btrfs_stack_block_group_chunk_objectid(&bgi),
@@ -665,7 +671,7 @@ static int check_block_group_item(struct extent_buffer *leaf,
return -EUCLEAN;
}
- if (btrfs_stack_block_group_used(&bgi) > key->offset) {
+ if (unlikely(btrfs_stack_block_group_used(&bgi) > key->offset)) {
block_group_err(leaf, slot,
"invalid block group used, have %llu expect [0, %llu)",
btrfs_stack_block_group_used(&bgi), key->offset);
@@ -673,7 +679,7 @@ static int check_block_group_item(struct extent_buffer *leaf,
}
flags = btrfs_stack_block_group_flags(&bgi);
- if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
+ if (unlikely(hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1)) {
block_group_err(leaf, slot,
"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
@@ -682,11 +688,11 @@ static int check_block_group_item(struct extent_buffer *leaf,
}
type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
- if (type != BTRFS_BLOCK_GROUP_DATA &&
- type != BTRFS_BLOCK_GROUP_METADATA &&
- type != BTRFS_BLOCK_GROUP_SYSTEM &&
- type != (BTRFS_BLOCK_GROUP_METADATA |
- BTRFS_BLOCK_GROUP_DATA)) {
+ if (unlikely(type != BTRFS_BLOCK_GROUP_DATA &&
+ type != BTRFS_BLOCK_GROUP_METADATA &&
+ type != BTRFS_BLOCK_GROUP_SYSTEM &&
+ type != (BTRFS_BLOCK_GROUP_METADATA |
+ BTRFS_BLOCK_GROUP_DATA))) {
block_group_err(leaf, slot,
"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
type, hweight64(type),
@@ -773,49 +779,49 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
ncopies = btrfs_raid_array[raid_index].ncopies;
nparity = btrfs_raid_array[raid_index].nparity;
- if (!num_stripes) {
+ if (unlikely(!num_stripes)) {
chunk_err(leaf, chunk, logical,
"invalid chunk num_stripes, have %u", num_stripes);
return -EUCLEAN;
}
- if (num_stripes < ncopies) {
+ if (unlikely(num_stripes < ncopies)) {
chunk_err(leaf, chunk, logical,
"invalid chunk num_stripes < ncopies, have %u < %d",
num_stripes, ncopies);
return -EUCLEAN;
}
- if (nparity && num_stripes == nparity) {
+ if (unlikely(nparity && num_stripes == nparity)) {
chunk_err(leaf, chunk, logical,
"invalid chunk num_stripes == nparity, have %u == %d",
num_stripes, nparity);
return -EUCLEAN;
}
- if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(logical, fs_info->sectorsize))) {
chunk_err(leaf, chunk, logical,
"invalid chunk logical, have %llu should aligned to %u",
logical, fs_info->sectorsize);
return -EUCLEAN;
}
- if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
+ if (unlikely(btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize)) {
chunk_err(leaf, chunk, logical,
"invalid chunk sectorsize, have %u expect %u",
btrfs_chunk_sector_size(leaf, chunk),
fs_info->sectorsize);
return -EUCLEAN;
}
- if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
+ if (unlikely(!length || !IS_ALIGNED(length, fs_info->sectorsize))) {
chunk_err(leaf, chunk, logical,
"invalid chunk length, have %llu", length);
return -EUCLEAN;
}
- if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
+ if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
chunk_err(leaf, chunk, logical,
"invalid chunk stripe length: %llu",
stripe_len);
return -EUCLEAN;
}
- if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
- type) {
+ if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
+ BTRFS_BLOCK_GROUP_PROFILE_MASK))) {
chunk_err(leaf, chunk, logical,
"unrecognized chunk type: 0x%llx",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
@@ -824,22 +830,23 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
return -EUCLEAN;
}
- if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
- (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
+ if (unlikely(!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0)) {
chunk_err(leaf, chunk, logical,
"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EUCLEAN;
}
- if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+ if (unlikely((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0)) {
chunk_err(leaf, chunk, logical,
"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
type, BTRFS_BLOCK_GROUP_TYPE_MASK);
return -EUCLEAN;
}
- if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
- (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+ if (unlikely((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+ (type & (BTRFS_BLOCK_GROUP_METADATA |
+ BTRFS_BLOCK_GROUP_DATA)))) {
chunk_err(leaf, chunk, logical,
"system chunk with data or metadata type: 0x%llx",
type);
@@ -851,20 +858,21 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
mixed = true;
if (!mixed) {
- if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
- (type & BTRFS_BLOCK_GROUP_DATA)) {
+ if (unlikely((type & BTRFS_BLOCK_GROUP_METADATA) &&
+ (type & BTRFS_BLOCK_GROUP_DATA))) {
chunk_err(leaf, chunk, logical,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EUCLEAN;
}
}
- if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
- (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
- (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
- ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
+ if (unlikely((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
+ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+ num_stripes != 1))) {
chunk_err(leaf, chunk, logical,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
@@ -887,7 +895,7 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
{
int num_stripes;
- if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) {
+ if (unlikely(btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk))) {
chunk_err(leaf, chunk, key->offset,
"invalid chunk item size: have %u expect [%zu, %u)",
btrfs_item_size_nr(leaf, slot),
@@ -901,8 +909,8 @@ static int check_leaf_chunk_item(struct extent_buffer *leaf,
if (num_stripes == 0)
goto out;
- if (btrfs_chunk_item_size(num_stripes) !=
- btrfs_item_size_nr(leaf, slot)) {
+ if (unlikely(btrfs_chunk_item_size(num_stripes) !=
+ btrfs_item_size_nr(leaf, slot))) {
chunk_err(leaf, chunk, key->offset,
"invalid chunk item size: have %u expect %lu",
btrfs_item_size_nr(leaf, slot),
@@ -941,14 +949,14 @@ static int check_dev_item(struct extent_buffer *leaf,
{
struct btrfs_dev_item *ditem;
- if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
+ if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
dev_item_err(leaf, slot,
"invalid objectid: has=%llu expect=%llu",
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
- if (btrfs_device_id(leaf, ditem) != key->offset) {
+ if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
dev_item_err(leaf, slot,
"devid mismatch: key has=%llu item has=%llu",
key->offset, btrfs_device_id(leaf, ditem));
@@ -960,8 +968,8 @@ static int check_dev_item(struct extent_buffer *leaf,
* it can be 0 for device removal. Device size check can only be done
* by dev extents check.
*/
- if (btrfs_device_bytes_used(leaf, ditem) >
- btrfs_device_total_bytes(leaf, ditem)) {
+ if (unlikely(btrfs_device_bytes_used(leaf, ditem) >
+ btrfs_device_total_bytes(leaf, ditem))) {
dev_item_err(leaf, slot,
"invalid bytes used: have %llu expect [0, %llu]",
btrfs_device_bytes_used(leaf, ditem),
@@ -986,13 +994,13 @@ static int check_inode_item(struct extent_buffer *leaf,
int ret;
ret = check_inode_key(leaf, key, slot);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
/* Here we use super block generation + 1 to handle log tree */
- if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
+ if (unlikely(btrfs_inode_generation(leaf, iitem) > super_gen + 1)) {
inode_item_err(leaf, slot,
"invalid inode generation: has %llu expect (0, %llu]",
btrfs_inode_generation(leaf, iitem),
@@ -1000,7 +1008,7 @@ static int check_inode_item(struct extent_buffer *leaf,
return -EUCLEAN;
}
/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
- if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
+ if (unlikely(btrfs_inode_transid(leaf, iitem) > super_gen + 1)) {
inode_item_err(leaf, slot,
"invalid inode transid: has %llu expect [0, %llu]",
btrfs_inode_transid(leaf, iitem), super_gen + 1);
@@ -1013,7 +1021,7 @@ static int check_inode_item(struct extent_buffer *leaf,
* anything in the fs. So here we skip the check.
*/
mode = btrfs_inode_mode(leaf, iitem);
- if (mode & ~valid_mask) {
+ if (unlikely(mode & ~valid_mask)) {
inode_item_err(leaf, slot,
"unknown mode bit detected: 0x%x",
mode & ~valid_mask);
@@ -1026,20 +1034,20 @@ static int check_inode_item(struct extent_buffer *leaf,
* FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS
*/
if (!has_single_bit_set(mode & S_IFMT)) {
- if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
+ if (unlikely(!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode))) {
inode_item_err(leaf, slot,
"invalid mode: has 0%o expect valid S_IF* bit(s)",
mode & S_IFMT);
return -EUCLEAN;
}
}
- if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
+ if (unlikely(S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1)) {
inode_item_err(leaf, slot,
"invalid nlink: has %u expect no more than 1 for dir",
btrfs_inode_nlink(leaf, iitem));
return -EUCLEAN;
}
- if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
+ if (unlikely(btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK)) {
inode_item_err(leaf, slot,
"unknown flags detected: 0x%llx",
btrfs_inode_flags(leaf, iitem) &
@@ -1059,11 +1067,12 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
int ret;
ret = check_root_key(leaf, key, slot);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
- if (btrfs_item_size_nr(leaf, slot) != sizeof(ri) &&
- btrfs_item_size_nr(leaf, slot) != btrfs_legacy_root_item_size()) {
+ if (unlikely(btrfs_item_size_nr(leaf, slot) != sizeof(ri) &&
+ btrfs_item_size_nr(leaf, slot) !=
+ btrfs_legacy_root_item_size())) {
generic_err(leaf, slot,
"invalid root item size, have %u expect %zu or %u",
btrfs_item_size_nr(leaf, slot), sizeof(ri),
@@ -1080,24 +1089,24 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
btrfs_item_size_nr(leaf, slot));
/* Generation related */
- if (btrfs_root_generation(&ri) >
- btrfs_super_generation(fs_info->super_copy) + 1) {
+ if (unlikely(btrfs_root_generation(&ri) >
+ btrfs_super_generation(fs_info->super_copy) + 1)) {
generic_err(leaf, slot,
"invalid root generation, have %llu expect (0, %llu]",
btrfs_root_generation(&ri),
btrfs_super_generation(fs_info->super_copy) + 1);
return -EUCLEAN;
}
- if (btrfs_root_generation_v2(&ri) >
- btrfs_super_generation(fs_info->super_copy) + 1) {
+ if (unlikely(btrfs_root_generation_v2(&ri) >
+ btrfs_super_generation(fs_info->super_copy) + 1)) {
generic_err(leaf, slot,
"invalid root v2 generation, have %llu expect (0, %llu]",
btrfs_root_generation_v2(&ri),
btrfs_super_generation(fs_info->super_copy) + 1);
return -EUCLEAN;
}
- if (btrfs_root_last_snapshot(&ri) >
- btrfs_super_generation(fs_info->super_copy) + 1) {
+ if (unlikely(btrfs_root_last_snapshot(&ri) >
+ btrfs_super_generation(fs_info->super_copy) + 1)) {
generic_err(leaf, slot,
"invalid root last_snapshot, have %llu expect (0, %llu]",
btrfs_root_last_snapshot(&ri),
@@ -1106,27 +1115,27 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
}
/* Alignment and level check */
- if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize))) {
generic_err(leaf, slot,
"invalid root bytenr, have %llu expect to be aligned to %u",
btrfs_root_bytenr(&ri), fs_info->sectorsize);
return -EUCLEAN;
}
- if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
+ if (unlikely(btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL)) {
generic_err(leaf, slot,
"invalid root level, have %u expect [0, %u]",
btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
return -EUCLEAN;
}
- if (ri.drop_level >= BTRFS_MAX_LEVEL) {
+ if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) {
generic_err(leaf, slot,
"invalid root level, have %u expect [0, %u]",
- ri.drop_level, BTRFS_MAX_LEVEL - 1);
+ btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1);
return -EUCLEAN;
}
/* Flags check */
- if (btrfs_root_flags(&ri) & ~valid_root_flags) {
+ if (unlikely(btrfs_root_flags(&ri) & ~valid_root_flags)) {
generic_err(leaf, slot,
"invalid root flags, have 0x%llx expect mask 0x%llx",
btrfs_root_flags(&ri), valid_root_flags);
@@ -1180,14 +1189,14 @@ static int check_extent_item(struct extent_buffer *leaf,
u64 total_refs; /* Total refs in btrfs_extent_item */
u64 inline_refs = 0; /* found total inline refs */
- if (key->type == BTRFS_METADATA_ITEM_KEY &&
- !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
+ if (unlikely(key->type == BTRFS_METADATA_ITEM_KEY &&
+ !btrfs_fs_incompat(fs_info, SKINNY_METADATA))) {
generic_err(leaf, slot,
"invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
return -EUCLEAN;
}
/* key->objectid is the bytenr for both key types */
- if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(key->objectid, fs_info->sectorsize))) {
generic_err(leaf, slot,
"invalid key objectid, have %llu expect to be aligned to %u",
key->objectid, fs_info->sectorsize);
@@ -1195,8 +1204,8 @@ static int check_extent_item(struct extent_buffer *leaf,
}
/* key->offset is tree level for METADATA_ITEM_KEY */
- if (key->type == BTRFS_METADATA_ITEM_KEY &&
- key->offset >= BTRFS_MAX_LEVEL) {
+ if (unlikely(key->type == BTRFS_METADATA_ITEM_KEY &&
+ key->offset >= BTRFS_MAX_LEVEL)) {
extent_err(leaf, slot,
"invalid tree level, have %llu expect [0, %u]",
key->offset, BTRFS_MAX_LEVEL - 1);
@@ -1222,7 +1231,7 @@ static int check_extent_item(struct extent_buffer *leaf,
* Either using btrfs_extent_inline_ref::offset, or specific
* data structure.
*/
- if (item_size < sizeof(*ei)) {
+ if (unlikely(item_size < sizeof(*ei))) {
extent_err(leaf, slot,
"invalid item size, have %u expect [%zu, %u)",
item_size, sizeof(*ei),
@@ -1236,15 +1245,16 @@ static int check_extent_item(struct extent_buffer *leaf,
flags = btrfs_extent_flags(leaf, ei);
total_refs = btrfs_extent_refs(leaf, ei);
generation = btrfs_extent_generation(leaf, ei);
- if (generation > btrfs_super_generation(fs_info->super_copy) + 1) {
+ if (unlikely(generation >
+ btrfs_super_generation(fs_info->super_copy) + 1)) {
extent_err(leaf, slot,
"invalid generation, have %llu expect (0, %llu]",
generation,
btrfs_super_generation(fs_info->super_copy) + 1);
return -EUCLEAN;
}
- if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
- BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
+ if (unlikely(!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
+ BTRFS_EXTENT_FLAG_TREE_BLOCK)))) {
extent_err(leaf, slot,
"invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
flags, BTRFS_EXTENT_FLAG_DATA |
@@ -1253,21 +1263,21 @@ static int check_extent_item(struct extent_buffer *leaf,
}
is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
if (is_tree_block) {
- if (key->type == BTRFS_EXTENT_ITEM_KEY &&
- key->offset != fs_info->nodesize) {
+ if (unlikely(key->type == BTRFS_EXTENT_ITEM_KEY &&
+ key->offset != fs_info->nodesize)) {
extent_err(leaf, slot,
"invalid extent length, have %llu expect %u",
key->offset, fs_info->nodesize);
return -EUCLEAN;
}
} else {
- if (key->type != BTRFS_EXTENT_ITEM_KEY) {
+ if (unlikely(key->type != BTRFS_EXTENT_ITEM_KEY)) {
extent_err(leaf, slot,
"invalid key type, have %u expect %u for data backref",
key->type, BTRFS_EXTENT_ITEM_KEY);
return -EUCLEAN;
}
- if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(key->offset, fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid extent length, have %llu expect aligned to %u",
key->offset, fs_info->sectorsize);
@@ -1281,7 +1291,7 @@ static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)ptr;
- if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
+ if (unlikely(btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL)) {
extent_err(leaf, slot,
"invalid tree block info level, have %u expect [0, %u]",
btrfs_tree_block_level(leaf, info),
@@ -1300,7 +1310,7 @@ static int check_extent_item(struct extent_buffer *leaf,
u64 inline_offset;
u8 inline_type;
- if (ptr + sizeof(*iref) > end) {
+ if (unlikely(ptr + sizeof(*iref) > end)) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
ptr, sizeof(*iref), end);
@@ -1309,7 +1319,7 @@ static int check_extent_item(struct extent_buffer *leaf,
iref = (struct btrfs_extent_inline_ref *)ptr;
inline_type = btrfs_extent_inline_ref_type(leaf, iref);
inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
- if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
+ if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
ptr, inline_type, end);
@@ -1323,7 +1333,8 @@ static int check_extent_item(struct extent_buffer *leaf,
break;
/* Contains parent bytenr */
case BTRFS_SHARED_BLOCK_REF_KEY:
- if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(inline_offset,
+ fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid tree parent bytenr, have %llu expect aligned to %u",
inline_offset, fs_info->sectorsize);
@@ -1338,7 +1349,8 @@ static int check_extent_item(struct extent_buffer *leaf,
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(dref_offset,
+ fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid data ref offset, have %llu expect aligned to %u",
dref_offset, fs_info->sectorsize);
@@ -1349,7 +1361,8 @@ static int check_extent_item(struct extent_buffer *leaf,
/* Contains parent bytenr and ref count */
case BTRFS_SHARED_DATA_REF_KEY:
sref = (struct btrfs_shared_data_ref *)(iref + 1);
- if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(inline_offset,
+ fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid data parent bytenr, have %llu expect aligned to %u",
inline_offset, fs_info->sectorsize);
@@ -1365,14 +1378,14 @@ static int check_extent_item(struct extent_buffer *leaf,
ptr += btrfs_extent_inline_ref_size(inline_type);
}
/* No padding is allowed */
- if (ptr != end) {
+ if (unlikely(ptr != end)) {
extent_err(leaf, slot,
"invalid extent item size, padding bytes found");
return -EUCLEAN;
}
/* Finally, check the inline refs against total refs */
- if (inline_refs > total_refs) {
+ if (unlikely(inline_refs > total_refs)) {
extent_err(leaf, slot,
"invalid extent refs, have %llu expect >= inline %llu",
total_refs, inline_refs);
@@ -1389,21 +1402,21 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
if (key->type == BTRFS_SHARED_DATA_REF_KEY)
expect_item_size = sizeof(struct btrfs_shared_data_ref);
- if (btrfs_item_size_nr(leaf, slot) != expect_item_size) {
+ if (unlikely(btrfs_item_size_nr(leaf, slot) != expect_item_size)) {
generic_err(leaf, slot,
"invalid item size, have %u expect %u for key type %u",
btrfs_item_size_nr(leaf, slot),
expect_item_size, key->type);
return -EUCLEAN;
}
- if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
generic_err(leaf, slot,
"invalid key objectid for shared block ref, have %llu expect aligned to %u",
key->objectid, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
- if (key->type != BTRFS_TREE_BLOCK_REF_KEY &&
- !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) {
+ if (unlikely(key->type != BTRFS_TREE_BLOCK_REF_KEY &&
+ !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid tree parent bytenr, have %llu expect aligned to %u",
key->offset, leaf->fs_info->sectorsize);
@@ -1419,14 +1432,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
- if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) {
+ if (unlikely(btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0)) {
generic_err(leaf, slot,
"invalid item size, have %u expect aligned to %zu for key type %u",
btrfs_item_size_nr(leaf, slot),
sizeof(*dref), key->type);
return -EUCLEAN;
}
- if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
generic_err(leaf, slot,
"invalid key objectid for shared block ref, have %llu expect aligned to %u",
key->objectid, leaf->fs_info->sectorsize);
@@ -1443,13 +1456,13 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
owner = btrfs_extent_data_ref_objectid(leaf, dref);
offset = btrfs_extent_data_ref_offset(leaf, dref);
hash = hash_extent_data_ref(root_objectid, owner, offset);
- if (hash != key->offset) {
+ if (unlikely(hash != key->offset)) {
extent_err(leaf, slot,
"invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
hash, key->offset);
return -EUCLEAN;
}
- if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
offset, leaf->fs_info->sectorsize);
@@ -1469,10 +1482,10 @@ static int check_inode_ref(struct extent_buffer *leaf,
unsigned long ptr;
unsigned long end;
- if (!check_prev_ino(leaf, key, slot, prev_key))
+ if (unlikely(!check_prev_ino(leaf, key, slot, prev_key)))
return -EUCLEAN;
/* namelen can't be 0, so item_size == sizeof() is also invalid */
- if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
+ if (unlikely(btrfs_item_size_nr(leaf, slot) <= sizeof(*iref))) {
inode_ref_err(leaf, slot,
"invalid item size, have %u expect (%zu, %u)",
btrfs_item_size_nr(leaf, slot),
@@ -1485,7 +1498,7 @@ static int check_inode_ref(struct extent_buffer *leaf,
while (ptr < end) {
u16 namelen;
- if (ptr + sizeof(iref) > end) {
+ if (unlikely(ptr + sizeof(iref) > end)) {
inode_ref_err(leaf, slot,
"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
ptr, end, sizeof(iref));
@@ -1494,7 +1507,7 @@ static int check_inode_ref(struct extent_buffer *leaf,
iref = (struct btrfs_inode_ref *)ptr;
namelen = btrfs_inode_ref_name_len(leaf, iref);
- if (ptr + sizeof(*iref) + namelen > end) {
+ if (unlikely(ptr + sizeof(*iref) + namelen > end)) {
inode_ref_err(leaf, slot,
"inode ref overflow, ptr %lu end %lu namelen %u",
ptr, end, namelen);
@@ -1577,7 +1590,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
u32 nritems = btrfs_header_nritems(leaf);
int slot;
- if (btrfs_header_level(leaf) != 0) {
+ if (unlikely(btrfs_header_level(leaf) != 0)) {
generic_err(leaf, 0,
"invalid level for leaf, have %d expect 0",
btrfs_header_level(leaf));
@@ -1596,19 +1609,19 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
u64 owner = btrfs_header_owner(leaf);
/* These trees must never be empty */
- if (owner == BTRFS_ROOT_TREE_OBJECTID ||
- owner == BTRFS_CHUNK_TREE_OBJECTID ||
- owner == BTRFS_EXTENT_TREE_OBJECTID ||
- owner == BTRFS_DEV_TREE_OBJECTID ||
- owner == BTRFS_FS_TREE_OBJECTID ||
- owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ if (unlikely(owner == BTRFS_ROOT_TREE_OBJECTID ||
+ owner == BTRFS_CHUNK_TREE_OBJECTID ||
+ owner == BTRFS_EXTENT_TREE_OBJECTID ||
+ owner == BTRFS_DEV_TREE_OBJECTID ||
+ owner == BTRFS_FS_TREE_OBJECTID ||
+ owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
generic_err(leaf, 0,
"invalid root, root %llu must never be empty",
owner);
return -EUCLEAN;
}
/* Unknown tree */
- if (owner == 0) {
+ if (unlikely(owner == 0)) {
generic_err(leaf, 0,
"invalid owner, root 0 is not defined");
return -EUCLEAN;
@@ -1616,7 +1629,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
return 0;
}
- if (nritems == 0)
+ if (unlikely(nritems == 0))
return 0;
/*
@@ -1637,7 +1650,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
btrfs_item_key_to_cpu(leaf, &key, slot);
/* Make sure the keys are in the right order */
- if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
+ if (unlikely(btrfs_comp_cpu_keys(&prev_key, &key) >= 0)) {
generic_err(leaf, slot,
"bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
prev_key.objectid, prev_key.type,
@@ -1656,7 +1669,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
else
item_end_expected = btrfs_item_offset_nr(leaf,
slot - 1);
- if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
+ if (unlikely(btrfs_item_end_nr(leaf, slot) != item_end_expected)) {
generic_err(leaf, slot,
"unexpected item end, have %u expect %u",
btrfs_item_end_nr(leaf, slot),
@@ -1669,8 +1682,8 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
* just in case all the items are consistent to each other, but
* all point outside of the leaf.
*/
- if (btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(fs_info)) {
+ if (unlikely(btrfs_item_end_nr(leaf, slot) >
+ BTRFS_LEAF_DATA_SIZE(fs_info))) {
generic_err(leaf, slot,
"slot end outside of leaf, have %u expect range [0, %u]",
btrfs_item_end_nr(leaf, slot),
@@ -1679,8 +1692,8 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
}
/* Also check if the item pointer overlaps with btrfs item. */
- if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
- btrfs_item_ptr_offset(leaf, slot)) {
+ if (unlikely(btrfs_item_ptr_offset(leaf, slot) <
+ btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item))) {
generic_err(leaf, slot,
"slot overlaps with its data, item end %lu data start %lu",
btrfs_item_nr_offset(slot) +
@@ -1695,7 +1708,7 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
* criteria
*/
ret = check_leaf_item(leaf, &key, slot, &prev_key);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
}
@@ -1728,13 +1741,13 @@ int btrfs_check_node(struct extent_buffer *node)
u64 bytenr;
int ret = 0;
- if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
+ if (unlikely(level <= 0 || level >= BTRFS_MAX_LEVEL)) {
generic_err(node, 0,
"invalid level for node, have %d expect [1, %d]",
level, BTRFS_MAX_LEVEL - 1);
return -EUCLEAN;
}
- if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
+ if (unlikely(nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info))) {
btrfs_crit(fs_info,
"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
btrfs_header_owner(node), node->start,
@@ -1748,13 +1761,13 @@ int btrfs_check_node(struct extent_buffer *node)
btrfs_node_key_to_cpu(node, &key, slot);
btrfs_node_key_to_cpu(node, &next_key, slot + 1);
- if (!bytenr) {
+ if (unlikely(!bytenr)) {
generic_err(node, slot,
"invalid NULL node pointer");
ret = -EUCLEAN;
goto out;
}
- if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
+ if (unlikely(!IS_ALIGNED(bytenr, fs_info->sectorsize))) {
generic_err(node, slot,
"unaligned pointer, have %llu should be aligned to %u",
bytenr, fs_info->sectorsize);
@@ -1762,7 +1775,7 @@ int btrfs_check_node(struct extent_buffer *node)
goto out;
}
- if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
+ if (unlikely(btrfs_comp_cpu_keys(&key, &next_key) >= 0)) {
generic_err(node, slot,
"bad key order, current (%llu %u %llu) next (%llu %u %llu)",
key.objectid, key.type, key.offset,
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index d3f28b8f4ff9..7c45d960b53c 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -52,7 +52,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
u32 nritems;
root_node = btrfs_lock_root_node(root);
- btrfs_set_lock_blocking_write(root_node);
nritems = btrfs_header_nritems(root_node);
root->defrag_max.objectid = 0;
/* from above we know this is not a leaf */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 56cbc1706b6f..254c2ee43aae 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -17,7 +17,6 @@
#include "backref.h"
#include "compression.h"
#include "qgroup.h"
-#include "inode-map.h"
#include "block-group.h"
#include "space-info.h"
@@ -139,8 +138,25 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *tree_root = fs_info->tree_root;
int ret = 0;
+ /*
+ * First check if the log root tree was already created. If not, create
+ * it before locking the root's log_mutex, just to keep lockdep happy.
+ */
+ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
+ mutex_lock(&tree_root->log_mutex);
+ if (!fs_info->log_root_tree) {
+ ret = btrfs_init_log_root_tree(trans, fs_info);
+ if (!ret)
+ set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
+ }
+ mutex_unlock(&tree_root->log_mutex);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&root->log_mutex);
if (root->log_root) {
@@ -156,13 +172,6 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
}
} else {
- mutex_lock(&fs_info->tree_log_mutex);
- if (!fs_info->log_root_tree)
- ret = btrfs_init_log_root_tree(trans, fs_info);
- mutex_unlock(&fs_info->tree_log_mutex);
- if (ret)
- goto out;
-
ret = btrfs_add_log_tree(trans, root);
if (ret)
goto out;
@@ -172,7 +181,6 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
root->log_start_pid = current->pid;
}
- atomic_inc(&root->log_batch);
atomic_inc(&root->log_writers);
if (ctx && !ctx->logging_new_name) {
int index = root->log_transid % 2;
@@ -576,6 +584,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
+ struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_fs_info *fs_info = root->fs_info;
int found_type;
u64 extent_end;
@@ -653,7 +662,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
/* drop any overlapping extents */
- ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
+ drop_args.start = start;
+ drop_args.end = extent_end;
+ drop_args.drop_cache = true;
+ ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
if (ret)
goto out;
@@ -828,9 +840,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- inode_add_bytes(inode, nbytes);
update_inode:
- ret = btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
out:
if (inode)
iput(inode);
@@ -1529,7 +1541,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, root, BTRFS_I(inode));
}
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1564,18 +1576,6 @@ out:
return ret;
}
-static int insert_orphan_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 ino)
-{
- int ret;
-
- ret = btrfs_insert_orphan_item(trans, root, ino);
- if (ret == -EEXIST)
- ret = 0;
-
- return ret;
-}
-
static int count_inode_extrefs(struct btrfs_root *root,
struct btrfs_inode *inode, struct btrfs_path *path)
{
@@ -1716,7 +1716,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, root, BTRFS_I(inode));
}
BTRFS_I(inode)->index_cnt = (u64)-1;
@@ -1727,7 +1727,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (ret)
goto out;
}
- ret = insert_orphan_item(trans, root, ino);
+ ret = btrfs_insert_orphan_item(trans, root, ino);
+ if (ret == -EEXIST)
+ ret = 0;
}
out:
@@ -1820,7 +1822,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
set_nlink(inode, 1);
else
inc_nlink(inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
} else if (ret == -EEXIST) {
ret = 0;
} else {
@@ -1973,7 +1975,7 @@ out:
btrfs_release_path(path);
if (!ret && update_size) {
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
- ret = btrfs_update_inode(trans, root, dir);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
}
kfree(name);
iput(dir);
@@ -2586,6 +2588,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
* those prealloc extents just after replaying them.
*/
if (S_ISREG(mode)) {
+ struct btrfs_drop_extents_args drop_args = { 0 };
struct inode *inode;
u64 from;
@@ -2596,12 +2599,18 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
}
from = ALIGN(i_size_read(inode),
root->fs_info->sectorsize);
- ret = btrfs_drop_extents(wc->trans, root, inode,
- from, (u64)-1, 1);
+ drop_args.start = from;
+ drop_args.end = (u64)-1;
+ drop_args.drop_cache = true;
+ ret = btrfs_drop_extents(wc->trans, root,
+ BTRFS_I(inode),
+ &drop_args);
if (!ret) {
+ inode_sub_bytes(inode,
+ drop_args.bytes_found);
/* Update the inode's nbytes. */
ret = btrfs_update_inode(wc->trans,
- root, inode);
+ root, BTRFS_I(inode));
}
iput(inode);
if (ret)
@@ -2709,7 +2718,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
blocksize = fs_info->nodesize;
- next = btrfs_find_create_tree_block(fs_info, bytenr);
+ next = btrfs_find_create_tree_block(fs_info, bytenr,
+ btrfs_header_owner(cur),
+ *level - 1);
if (IS_ERR(next))
return PTR_ERR(next);
@@ -2732,7 +2743,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
- btrfs_set_lock_blocking_write(next);
btrfs_clean_tree_block(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -2801,7 +2811,6 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
- btrfs_set_lock_blocking_write(next);
btrfs_clean_tree_block(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -2883,7 +2892,6 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
- btrfs_set_lock_blocking_write(next);
btrfs_clean_tree_block(next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
@@ -3023,6 +3031,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
int log_transid = 0;
struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug;
+ u64 log_root_start;
+ u64 log_root_level;
mutex_lock(&root->log_mutex);
log_transid = ctx->log_transid;
@@ -3200,22 +3210,31 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out_wake_log_root;
}
- btrfs_set_super_log_root(fs_info->super_for_commit,
- log_root_tree->node->start);
- btrfs_set_super_log_root_level(fs_info->super_for_commit,
- btrfs_header_level(log_root_tree->node));
-
+ log_root_start = log_root_tree->node->start;
+ log_root_level = btrfs_header_level(log_root_tree->node);
log_root_tree->log_transid++;
mutex_unlock(&log_root_tree->log_mutex);
/*
- * Nobody else is going to jump in and write the ctree
- * super here because the log_commit atomic below is protecting
- * us. We must be called with a transaction handle pinning
- * the running transaction open, so a full commit can't hop
- * in and cause problems either.
+ * Here we are guaranteed that nobody is going to write the superblock
+ * for the current transaction before us and that neither we do write
+ * our superblock before the previous transaction finishes its commit
+ * and writes its superblock, because:
+ *
+ * 1) We are holding a handle on the current transaction, so no body
+ * can commit it until we release the handle;
+ *
+ * 2) Before writing our superblock we acquire the tree_log_mutex, so
+ * if the previous transaction is still committing, and hasn't yet
+ * written its superblock, we wait for it to do it, because a
+ * transaction commit acquires the tree_log_mutex when the commit
+ * begins and releases it only after writing its superblock.
*/
+ mutex_lock(&fs_info->tree_log_mutex);
+ btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
+ btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
ret = write_all_supers(fs_info, 1);
+ mutex_unlock(&fs_info->tree_log_mutex);
if (ret) {
btrfs_set_log_full_commit(trans);
btrfs_abort_transaction(trans, ret);
@@ -3300,6 +3319,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
if (fs_info->log_root_tree) {
free_log_tree(trans, fs_info->log_root_tree);
fs_info->log_root_tree = NULL;
+ clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
}
return 0;
}
@@ -4196,6 +4216,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_log_ctx *ctx)
{
+ struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_root *log = root->log_root;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
@@ -4204,19 +4225,21 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
u64 extent_offset = em->start - em->orig_start;
u64 block_len;
int ret;
- int extent_inserted = 0;
ret = log_extent_csums(trans, inode, log, em, ctx);
if (ret)
return ret;
- ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
- em->start + em->len, NULL, 0, 1,
- sizeof(*fi), &extent_inserted);
+ drop_args.path = path;
+ drop_args.start = em->start;
+ drop_args.end = em->start + em->len;
+ drop_args.replace_extent = true;
+ drop_args.extent_item_size = sizeof(*fi);
+ ret = btrfs_drop_extents(trans, log, inode, &drop_args);
if (ret)
return ret;
- if (!extent_inserted) {
+ if (!drop_args.extent_inserted) {
key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = em->start;
@@ -4375,8 +4398,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
do {
ret = btrfs_truncate_inode_items(trans,
root->log_root,
- &inode->vfs_inode,
- truncate_offset,
+ inode, truncate_offset,
BTRFS_EXTENT_DATA_KEY);
} while (ret == -EAGAIN);
if (ret)
@@ -4415,14 +4437,12 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct extent_map *em, *n;
struct list_head extents;
struct extent_map_tree *tree = &inode->extent_tree;
- u64 test_gen;
int ret = 0;
int num = 0;
INIT_LIST_HEAD(&extents);
write_lock(&tree->lock);
- test_gen = root->fs_info->last_trans_committed;
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
list_del_init(&em->list);
@@ -4438,7 +4458,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
goto process;
}
- if (em->generation <= test_gen)
+ if (em->generation < trans->transid)
continue;
/* We log prealloc extents beyond eof later. */
@@ -4571,6 +4591,10 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
const u64 ino = btrfs_ino(inode);
int ins_nr = 0;
int start_slot = 0;
+ bool found_xattrs = false;
+
+ if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
+ return 0;
key.objectid = ino;
key.type = BTRFS_XATTR_ITEM_KEY;
@@ -4609,6 +4633,7 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
start_slot = slot;
ins_nr++;
path->slots[0]++;
+ found_xattrs = true;
cond_resched();
}
if (ins_nr > 0) {
@@ -4618,6 +4643,9 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
return ret;
}
+ if (!found_xattrs)
+ set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
+
return 0;
}
@@ -5303,7 +5331,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
&inode->runtime_flags);
while(1) {
ret = btrfs_truncate_inode_items(trans,
- log, &inode->vfs_inode, 0, 0);
+ log, inode, 0, 0);
if (ret != -EAGAIN)
break;
}
@@ -5442,11 +5470,10 @@ out_unlock:
static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
bool ret = false;
mutex_lock(&inode->log_mutex);
- if (inode->last_unlink_trans > fs_info->last_trans_committed) {
+ if (inode->last_unlink_trans >= trans->transid) {
/*
* Make sure any commits to the log are forced to be full
* commits.
@@ -5468,8 +5495,7 @@ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct dentry *parent,
- struct super_block *sb,
- u64 last_committed)
+ struct super_block *sb)
{
int ret = 0;
struct dentry *old_parent = NULL;
@@ -5481,8 +5507,8 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
* and other fun in this file.
*/
if (S_ISREG(inode->vfs_inode.i_mode) &&
- inode->generation <= last_committed &&
- inode->last_unlink_trans <= last_committed)
+ inode->generation < trans->transid &&
+ inode->last_unlink_trans < trans->transid)
goto out;
if (!S_ISDIR(inode->vfs_inode.i_mode)) {
@@ -5828,7 +5854,6 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
while (true) {
struct btrfs_fs_info *fs_info = root->fs_info;
- const u64 last_committed = fs_info->last_trans_committed;
struct extent_buffer *leaf = path->nodes[0];
int slot = path->slots[0];
struct btrfs_key search_key;
@@ -5847,7 +5872,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (BTRFS_I(inode)->generation > last_committed)
+ if (BTRFS_I(inode)->generation >= trans->transid)
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(inode);
@@ -5888,7 +5913,6 @@ static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_root *root = inode->root;
- struct btrfs_fs_info *fs_info = root->fs_info;
struct dentry *old_parent = NULL;
struct super_block *sb = inode->vfs_inode.i_sb;
int ret = 0;
@@ -5902,7 +5926,7 @@ static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
if (root != inode->root)
break;
- if (inode->generation > fs_info->last_trans_committed) {
+ if (inode->generation >= trans->transid) {
ret = btrfs_log_inode(trans, root, inode,
LOG_INODE_EXISTS, ctx);
if (ret)
@@ -6019,7 +6043,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct super_block *sb;
int ret = 0;
- u64 last_committed = fs_info->last_trans_committed;
bool log_dentries = false;
sb = inode->vfs_inode.i_sb;
@@ -6029,23 +6052,12 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_no_trans;
}
- /*
- * The prev transaction commit doesn't complete, we need do
- * full commit by ourselves.
- */
- if (fs_info->last_trans_log_full_commit >
- fs_info->last_trans_committed) {
- ret = 1;
- goto end_no_trans;
- }
-
if (btrfs_root_refs(&root->root_item) == 0) {
ret = 1;
goto end_no_trans;
}
- ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
- last_committed);
+ ret = check_parent_dirs_for_sync(trans, inode, parent, sb);
if (ret)
goto end_no_trans;
@@ -6075,8 +6087,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
* and other fun in this file.
*/
if (S_ISREG(inode->vfs_inode.i_mode) &&
- inode->generation <= last_committed &&
- inode->last_unlink_trans <= last_committed) {
+ inode->generation < trans->transid &&
+ inode->last_unlink_trans < trans->transid) {
ret = 0;
goto end_trans;
}
@@ -6125,7 +6137,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
* but the file inode does not have a matching BTRFS_INODE_REF_KEY item
* and has a link count of 2.
*/
- if (inode->last_unlink_trans > last_committed) {
+ if (inode->last_unlink_trans >= trans->transid) {
ret = btrfs_log_all_parents(trans, inode, ctx);
if (ret)
goto end_trans;
@@ -6434,7 +6446,6 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
struct dentry *parent)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_log_ctx ctx;
/*
@@ -6448,8 +6459,8 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
* if this inode hasn't been logged and directory we're renaming it
* from hasn't been logged, we don't need to log it
*/
- if (inode->logged_trans <= fs_info->last_trans_committed &&
- (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
+ if (inode->logged_trans < trans->transid &&
+ (!old_dir || old_dir->logged_trans < trans->transid))
return;
btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 28525ad7ff8c..74023c8a783f 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -129,8 +129,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
} else {
btrfs_warn(fs_info,
"insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
- ret, (unsigned long long)key.objectid,
- (unsigned long long)key.offset, type);
+ ret, key.objectid, key.offset, type);
goto out;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 78637665166e..7930e1c78c45 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -31,6 +31,7 @@
#include "space-info.h"
#include "block-group.h"
#include "discard.h"
+#include "zoned.h"
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
@@ -374,6 +375,7 @@ void btrfs_free_device(struct btrfs_device *device)
rcu_string_free(device->name);
extent_io_tree_release(&device->alloc_state);
bio_put(device->flush_bio);
+ btrfs_destroy_dev_zone_info(device);
kfree(device);
}
@@ -667,6 +669,10 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
device->mode = flags;
+ ret = btrfs_get_dev_zone_info(device);
+ if (ret != 0)
+ goto error_free_page;
+
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -822,7 +828,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
} else {
mutex_lock(&fs_devices->device_list_mutex);
device = btrfs_find_device(fs_devices, devid,
- disk_super->dev_item.uuid, NULL, false);
+ disk_super->dev_item.uuid, NULL);
/*
* If this disk has been pulled into an fs devices created by
@@ -1044,7 +1050,7 @@ error:
}
static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
- int step, struct btrfs_device **latest_dev)
+ struct btrfs_device **latest_dev)
{
struct btrfs_device *device, *next;
@@ -1089,16 +1095,16 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
* After we have read the system tree and know devids belonging to this
* filesystem, remove the device which does not belong there.
*/
-void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
+void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *latest_dev = NULL;
struct btrfs_fs_devices *seed_dev;
mutex_lock(&uuid_mutex);
- __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
+ __btrfs_free_extra_devids(fs_devices, &latest_dev);
list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
- __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
+ __btrfs_free_extra_devids(seed_dev, &latest_dev);
fs_devices->latest_bdev = latest_dev->bdev;
@@ -1137,6 +1143,7 @@ static void btrfs_close_one_device(struct btrfs_device *device)
device->bdev = NULL;
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ btrfs_destroy_dev_zone_info(device);
device->fs_info = NULL;
atomic_set(&device->dev_stats_ccnt, 0);
@@ -1217,6 +1224,7 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
fs_devices->latest_bdev = latest_dev->bdev;
fs_devices->total_rw_bytes = 0;
fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
+ fs_devices->read_policy = BTRFS_READ_POLICY_PID;
return 0;
}
@@ -1268,7 +1276,7 @@ void btrfs_release_disk_super(struct btrfs_super_block *super)
}
static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
- u64 bytenr)
+ u64 bytenr, u64 bytenr_orig)
{
struct btrfs_super_block *disk_super;
struct page *page;
@@ -1299,7 +1307,7 @@ static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev
/* align our pointer to the offset of the super block */
disk_super = p + offset_in_page(bytenr);
- if (btrfs_super_bytenr(disk_super) != bytenr ||
+ if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
btrfs_release_disk_super(p);
return ERR_PTR(-EINVAL);
@@ -1334,7 +1342,8 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct block_device *bdev;
- u64 bytenr;
+ u64 bytenr, bytenr_orig;
+ int ret;
lockdep_assert_held(&uuid_mutex);
@@ -1344,14 +1353,18 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
* So, we need to add a special mount option to scan for
* later supers, using BTRFS_SUPER_MIRROR_MAX instead
*/
- bytenr = btrfs_sb_offset(0);
flags |= FMODE_EXCL;
bdev = blkdev_get_by_path(path, flags, holder);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
- disk_super = btrfs_read_disk_super(bdev, bytenr);
+ bytenr_orig = btrfs_sb_offset(0);
+ ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
goto error_bdev_put;
@@ -2015,6 +2028,11 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
if (IS_ERR(disk_super))
continue;
+ if (bdev_is_zoned(bdev)) {
+ btrfs_reset_sb_log_zones(bdev, copy_num);
+ continue;
+ }
+
memset(&disk_super->magic, 0, sizeof(disk_super->magic));
page = virt_to_page(disk_super);
@@ -2293,10 +2311,10 @@ static struct btrfs_device *btrfs_find_device_by_path(
dev_uuid = disk_super->dev_item.uuid;
if (btrfs_fs_incompat(fs_info, METADATA_UUID))
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
- disk_super->metadata_uuid, true);
+ disk_super->metadata_uuid);
else
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
- disk_super->fsid, true);
+ disk_super->fsid);
btrfs_release_disk_super(disk_super);
if (!device)
@@ -2316,7 +2334,7 @@ struct btrfs_device *btrfs_find_device_by_devspec(
if (devid) {
device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
- NULL, true);
+ NULL);
if (!device)
return ERR_PTR(-ENOENT);
return device;
@@ -2465,7 +2483,7 @@ next_slot:
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
- fs_uuid, true);
+ fs_uuid);
BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
@@ -2507,6 +2525,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (IS_ERR(bdev))
return PTR_ERR(bdev);
+ if (!btrfs_check_device_zone_type(fs_info, bdev)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
if (fs_devices->seeding) {
seeding_dev = 1;
down_write(&sb->s_umount);
@@ -2540,10 +2563,17 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
rcu_assign_pointer(device->name, name);
+ device->fs_info = fs_info;
+ device->bdev = bdev;
+
+ ret = btrfs_get_dev_zone_info(device);
+ if (ret)
+ goto error_free_device;
+
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- goto error_free_device;
+ goto error_free_zone;
}
q = bdev_get_queue(bdev);
@@ -2556,8 +2586,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
fs_info->sectorsize);
device->disk_total_bytes = device->total_bytes;
device->commit_total_bytes = device->total_bytes;
- device->fs_info = fs_info;
- device->bdev = bdev;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->mode = FMODE_EXCL;
@@ -2704,6 +2732,8 @@ error_trans:
sb->s_flags |= SB_RDONLY;
if (trans)
btrfs_end_transaction(trans);
+error_free_zone:
+ btrfs_destroy_dev_zone_info(device);
error_free_device:
btrfs_free_device(device);
error:
@@ -5479,7 +5509,18 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
else
num_stripes = map->num_stripes;
- preferred_mirror = first + current->pid % num_stripes;
+ switch (fs_info->fs_devices->read_policy) {
+ default:
+ /* Shouldn't happen, just warn and use pid instead of failing */
+ btrfs_warn_rl(fs_info,
+ "unknown read_policy type %u, reset to pid",
+ fs_info->fs_devices->read_policy);
+ fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
+ fallthrough;
+ case BTRFS_READ_POLICY_PID:
+ preferred_mirror = first + (current->pid % num_stripes);
+ break;
+ }
if (dev_replace_is_ongoing &&
fs_info->dev_replace.cont_reading_from_srcdev_mode ==
@@ -6335,7 +6376,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
bio->bi_iter.bi_sector = physical >> 9;
btrfs_debug_in_rcu(fs_info,
"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
- bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
+ bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
(unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
dev->devid, bio->bi_iter.bi_size);
bio_set_dev(bio, dev->bdev);
@@ -6367,7 +6408,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
{
struct btrfs_device *dev;
struct bio *first_bio = bio;
- u64 logical = (u64)bio->bi_iter.bi_sector << 9;
+ u64 logical = bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
@@ -6447,8 +6488,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
* If @seed is true, traverse through the seed devices.
*/
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
- u64 devid, u8 *uuid, u8 *fsid,
- bool seed)
+ u64 devid, u8 *uuid, u8 *fsid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *seed_devs;
@@ -6655,7 +6695,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
- devid, uuid, NULL, true);
+ devid, uuid, NULL);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
@@ -6794,7 +6834,7 @@ static int read_one_dev(struct extent_buffer *leaf,
}
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
- fs_uuid, true);
+ fs_uuid);
if (!device) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info, devid,
@@ -6857,6 +6897,16 @@ static int read_one_dev(struct extent_buffer *leaf,
}
fill_device_from_item(leaf, dev_item, device);
+ if (device->bdev) {
+ u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
+
+ if (device->total_bytes > max_total_bytes) {
+ btrfs_err(fs_info,
+ "device total_bytes should be at most %llu but found %llu",
+ max_total_bytes, device->total_bytes);
+ return -EINVAL;
+ }
+ }
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
@@ -6891,11 +6941,11 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
* fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
* overallocate but we can keep it as-is, only the first page is used.
*/
- sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
+ sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
+ root->root_key.objectid, 0);
if (IS_ERR(sb))
return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
- btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
/*
* The sb extent buffer is artificial and just used to read the system array.
* set_extent_buffer_uptodate() call does not properly mark all it's
@@ -7059,12 +7109,8 @@ static void readahead_tree_node_children(struct extent_buffer *node)
int i;
const int nr_items = btrfs_header_nritems(node);
- for (i = 0; i < nr_items; i++) {
- u64 start;
-
- start = btrfs_node_blockptr(node, i);
- readahead_tree_block(node->fs_info, start);
- }
+ for (i = 0; i < nr_items; i++)
+ btrfs_readahead_node_child(node, i);
}
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
@@ -7451,8 +7497,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
int i;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
- true);
+ dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
@@ -7583,28 +7628,13 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
}
/* Make sure no dev extent is beyond device bondary */
- dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
+ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
if (!dev) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
goto out;
}
- /* It's possible this device is a dummy for seed device */
- if (dev->disk_total_bytes == 0) {
- struct btrfs_fs_devices *devs;
-
- devs = list_first_entry(&fs_info->fs_devices->seed_list,
- struct btrfs_fs_devices, seed_list);
- dev = btrfs_find_device(devs, devid, NULL, NULL, false);
- if (!dev) {
- btrfs_err(fs_info, "failed to find seed devid %llu",
- devid);
- ret = -EUCLEAN;
- goto out;
- }
- }
-
if (physical_offset + physical_len > dev->disk_total_bytes) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
@@ -7659,6 +7689,19 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
u64 prev_dev_ext_end = 0;
int ret = 0;
+ /*
+ * We don't have a dev_root because we mounted with ignorebadroots and
+ * failed to load the root, so we want to skip the verification in this
+ * case for sure.
+ *
+ * However if the dev root is fine, but the tree itself is corrupted
+ * we'd still fail to mount. This verification is only to make sure
+ * writes can happen safely, so instead just bypass this check
+ * completely in the case of IGNOREBADROOTS.
+ */
+ if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
+ return 0;
+
key.objectid = 1;
key.type = BTRFS_DEV_EXTENT_KEY;
key.offset = 0;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 232f02bd214f..1997a4649a66 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -52,6 +52,8 @@ struct btrfs_io_geometry {
#define BTRFS_DEV_STATE_FLUSH_SENT (4)
#define BTRFS_DEV_STATE_NO_READA (5)
+struct btrfs_zoned_device_info;
+
struct btrfs_device {
struct list_head dev_list; /* device_list_mutex */
struct list_head dev_alloc_list; /* chunk mutex */
@@ -65,6 +67,8 @@ struct btrfs_device {
struct block_device *bdev;
+ struct btrfs_zoned_device_info *zone_info;
+
/* the mode sent to blkdev_get */
fmode_t mode;
@@ -211,6 +215,16 @@ enum btrfs_chunk_allocation_policy {
BTRFS_CHUNK_ALLOC_REGULAR,
};
+/*
+ * Read policies for mirrored block group profiles, read picks the stripe based
+ * on these policies.
+ */
+enum btrfs_read_policy {
+ /* Use process PID to choose the stripe */
+ BTRFS_READ_POLICY_PID,
+ BTRFS_NR_READ_POLICY,
+};
+
struct btrfs_fs_devices {
u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
u8 metadata_uuid[BTRFS_FSID_SIZE];
@@ -264,6 +278,9 @@ struct btrfs_fs_devices {
struct completion kobj_unregister;
enum btrfs_chunk_allocation_policy chunk_alloc_policy;
+
+ /* Policy used to read the mirrored stripes */
+ enum btrfs_read_policy read_policy;
};
#define BTRFS_BIO_INLINE_CSUM_SIZE 64
@@ -436,7 +453,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path,
fmode_t flags, void *holder);
int btrfs_forget_devices(const char *path);
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
-void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
+void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
void btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev);
struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
@@ -453,7 +470,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
- u64 devid, u8 *uuid, u8 *fsid, bool seed);
+ u64 devid, u8 *uuid, u8 *fsid);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
int btrfs_balance(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 95d9aebff2c4..af6246f36a9e 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -213,9 +213,11 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
}
out:
btrfs_free_path(path);
- if (!ret)
+ if (!ret) {
set_bit(BTRFS_INODE_COPY_EVERYTHING,
&BTRFS_I(inode)->runtime_flags);
+ clear_bit(BTRFS_INODE_NO_XATTRS, &BTRFS_I(inode)->runtime_flags);
+ }
return ret;
}
@@ -239,7 +241,7 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
inode_inc_iversion(inode);
inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
BUG_ON(ret);
out:
btrfs_end_transaction(trans);
@@ -390,7 +392,7 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
if (!ret) {
inode_inc_iversion(inode);
inode->i_ctime = current_time(inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
BUG_ON(ret);
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
new file mode 100644
index 000000000000..155545180046
--- /dev/null
+++ b/fs/btrfs/zoned.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include "ctree.h"
+#include "volumes.h"
+#include "zoned.h"
+#include "rcu-string.h"
+
+/* Maximum number of zones to report per blkdev_report_zones() call */
+#define BTRFS_REPORT_NR_ZONES 4096
+
+/* Number of superblock log zones */
+#define BTRFS_NR_SB_LOG_ZONES 2
+
+static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
+{
+ struct blk_zone *zones = data;
+
+ memcpy(&zones[idx], zone, sizeof(*zone));
+
+ return 0;
+}
+
+static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
+ u64 *wp_ret)
+{
+ bool empty[BTRFS_NR_SB_LOG_ZONES];
+ bool full[BTRFS_NR_SB_LOG_ZONES];
+ sector_t sector;
+
+ ASSERT(zones[0].type != BLK_ZONE_TYPE_CONVENTIONAL &&
+ zones[1].type != BLK_ZONE_TYPE_CONVENTIONAL);
+
+ empty[0] = (zones[0].cond == BLK_ZONE_COND_EMPTY);
+ empty[1] = (zones[1].cond == BLK_ZONE_COND_EMPTY);
+ full[0] = (zones[0].cond == BLK_ZONE_COND_FULL);
+ full[1] = (zones[1].cond == BLK_ZONE_COND_FULL);
+
+ /*
+ * Possible states of log buffer zones
+ *
+ * Empty[0] In use[0] Full[0]
+ * Empty[1] * x 0
+ * In use[1] 0 x 0
+ * Full[1] 1 1 C
+ *
+ * Log position:
+ * *: Special case, no superblock is written
+ * 0: Use write pointer of zones[0]
+ * 1: Use write pointer of zones[1]
+ * C: Compare super blcoks from zones[0] and zones[1], use the latest
+ * one determined by generation
+ * x: Invalid state
+ */
+
+ if (empty[0] && empty[1]) {
+ /* Special case to distinguish no superblock to read */
+ *wp_ret = zones[0].start << SECTOR_SHIFT;
+ return -ENOENT;
+ } else if (full[0] && full[1]) {
+ /* Compare two super blocks */
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct page *page[BTRFS_NR_SB_LOG_ZONES];
+ struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
+ int i;
+
+ for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+ u64 bytenr;
+
+ bytenr = ((zones[i].start + zones[i].len)
+ << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
+
+ page[i] = read_cache_page_gfp(mapping,
+ bytenr >> PAGE_SHIFT, GFP_NOFS);
+ if (IS_ERR(page[i])) {
+ if (i == 1)
+ btrfs_release_disk_super(super[0]);
+ return PTR_ERR(page[i]);
+ }
+ super[i] = page_address(page[i]);
+ }
+
+ if (super[0]->generation > super[1]->generation)
+ sector = zones[1].start;
+ else
+ sector = zones[0].start;
+
+ for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
+ btrfs_release_disk_super(super[i]);
+ } else if (!full[0] && (empty[1] || full[1])) {
+ sector = zones[0].wp;
+ } else if (full[0]) {
+ sector = zones[1].wp;
+ } else {
+ return -EUCLEAN;
+ }
+ *wp_ret = sector << SECTOR_SHIFT;
+ return 0;
+}
+
+/*
+ * The following zones are reserved as the circular buffer on ZONED btrfs.
+ * - The primary superblock: zones 0 and 1
+ * - The first copy: zones 16 and 17
+ * - The second copy: zones 1024 or zone at 256GB which is minimum, and
+ * the following one
+ */
+static inline u32 sb_zone_number(int shift, int mirror)
+{
+ ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
+
+ switch (mirror) {
+ case 0: return 0;
+ case 1: return 16;
+ case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
+ }
+
+ return 0;
+}
+
+static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
+ struct blk_zone *zones, unsigned int *nr_zones)
+{
+ int ret;
+
+ if (!*nr_zones)
+ return 0;
+
+ ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
+ copy_zone_info_cb, zones);
+ if (ret < 0) {
+ btrfs_err_in_rcu(device->fs_info,
+ "zoned: failed to read zone %llu on %s (devid %llu)",
+ pos, rcu_str_deref(device->name),
+ device->devid);
+ return ret;
+ }
+ *nr_zones = ret;
+ if (!ret)
+ return -EIO;
+
+ return 0;
+}
+
+int btrfs_get_dev_zone_info(struct btrfs_device *device)
+{
+ struct btrfs_zoned_device_info *zone_info = NULL;
+ struct block_device *bdev = device->bdev;
+ struct request_queue *queue = bdev_get_queue(bdev);
+ sector_t nr_sectors;
+ sector_t sector = 0;
+ struct blk_zone *zones = NULL;
+ unsigned int i, nreported = 0, nr_zones;
+ unsigned int zone_sectors;
+ int ret;
+
+ if (!bdev_is_zoned(bdev))
+ return 0;
+
+ if (device->zone_info)
+ return 0;
+
+ zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
+ if (!zone_info)
+ return -ENOMEM;
+
+ nr_sectors = bdev->bd_part->nr_sects;
+ zone_sectors = bdev_zone_sectors(bdev);
+ /* Check if it's power of 2 (see is_power_of_2) */
+ ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
+ zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
+ zone_info->zone_size_shift = ilog2(zone_info->zone_size);
+ zone_info->max_zone_append_size =
+ (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
+ zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
+ if (!IS_ALIGNED(nr_sectors, zone_sectors))
+ zone_info->nr_zones++;
+
+ zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
+ if (!zone_info->seq_zones) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
+ if (!zone_info->empty_zones) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
+ if (!zones) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Get zones type */
+ while (sector < nr_sectors) {
+ nr_zones = BTRFS_REPORT_NR_ZONES;
+ ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
+ &nr_zones);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < nr_zones; i++) {
+ if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
+ __set_bit(nreported, zone_info->seq_zones);
+ if (zones[i].cond == BLK_ZONE_COND_EMPTY)
+ __set_bit(nreported, zone_info->empty_zones);
+ nreported++;
+ }
+ sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
+ }
+
+ if (nreported != zone_info->nr_zones) {
+ btrfs_err_in_rcu(device->fs_info,
+ "inconsistent number of zones on %s (%u/%u)",
+ rcu_str_deref(device->name), nreported,
+ zone_info->nr_zones);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Validate superblock log */
+ nr_zones = BTRFS_NR_SB_LOG_ZONES;
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ u32 sb_zone;
+ u64 sb_wp;
+ int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
+
+ sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
+ if (sb_zone + 1 >= zone_info->nr_zones)
+ continue;
+
+ sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
+ ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+ &zone_info->sb_zones[sb_pos],
+ &nr_zones);
+ if (ret)
+ goto out;
+
+ if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
+ btrfs_err_in_rcu(device->fs_info,
+ "zoned: failed to read super block log zone info at devid %llu zone %u",
+ device->devid, sb_zone);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ /*
+ * If zones[0] is conventional, always use the beggining of the
+ * zone to record superblock. No need to validate in that case.
+ */
+ if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
+ BLK_ZONE_TYPE_CONVENTIONAL)
+ continue;
+
+ ret = sb_write_pointer(device->bdev,
+ &zone_info->sb_zones[sb_pos], &sb_wp);
+ if (ret != -ENOENT && ret) {
+ btrfs_err_in_rcu(device->fs_info,
+ "zoned: super block log zone corrupted devid %llu zone %u",
+ device->devid, sb_zone);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+
+
+ kfree(zones);
+
+ device->zone_info = zone_info;
+
+ /* device->fs_info is not safe to use for printing messages */
+ btrfs_info_in_rcu(NULL,
+ "host-%s zoned block device %s, %u zones of %llu bytes",
+ bdev_zoned_model(bdev) == BLK_ZONED_HM ? "managed" : "aware",
+ rcu_str_deref(device->name), zone_info->nr_zones,
+ zone_info->zone_size);
+
+ return 0;
+
+out:
+ kfree(zones);
+ bitmap_free(zone_info->empty_zones);
+ bitmap_free(zone_info->seq_zones);
+ kfree(zone_info);
+
+ return ret;
+}
+
+void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
+{
+ struct btrfs_zoned_device_info *zone_info = device->zone_info;
+
+ if (!zone_info)
+ return;
+
+ bitmap_free(zone_info->seq_zones);
+ bitmap_free(zone_info->empty_zones);
+ kfree(zone_info);
+ device->zone_info = NULL;
+}
+
+int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
+ struct blk_zone *zone)
+{
+ unsigned int nr_zones = 1;
+ int ret;
+
+ ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
+ if (ret != 0 || !nr_zones)
+ return ret ? ret : -EIO;
+
+ return 0;
+}
+
+int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_device *device;
+ u64 zoned_devices = 0;
+ u64 nr_devices = 0;
+ u64 zone_size = 0;
+ u64 max_zone_append_size = 0;
+ const bool incompat_zoned = btrfs_is_zoned(fs_info);
+ int ret = 0;
+
+ /* Count zoned devices */
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ enum blk_zoned_model model;
+
+ if (!device->bdev)
+ continue;
+
+ model = bdev_zoned_model(device->bdev);
+ if (model == BLK_ZONED_HM ||
+ (model == BLK_ZONED_HA && incompat_zoned)) {
+ struct btrfs_zoned_device_info *zone_info;
+
+ zone_info = device->zone_info;
+ zoned_devices++;
+ if (!zone_size) {
+ zone_size = zone_info->zone_size;
+ } else if (zone_info->zone_size != zone_size) {
+ btrfs_err(fs_info,
+ "zoned: unequal block device zone sizes: have %llu found %llu",
+ device->zone_info->zone_size,
+ zone_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!max_zone_append_size ||
+ (zone_info->max_zone_append_size &&
+ zone_info->max_zone_append_size < max_zone_append_size))
+ max_zone_append_size =
+ zone_info->max_zone_append_size;
+ }
+ nr_devices++;
+ }
+
+ if (!zoned_devices && !incompat_zoned)
+ goto out;
+
+ if (!zoned_devices && incompat_zoned) {
+ /* No zoned block device found on ZONED filesystem */
+ btrfs_err(fs_info,
+ "zoned: no zoned devices found on a zoned filesystem");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (zoned_devices && !incompat_zoned) {
+ btrfs_err(fs_info,
+ "zoned: mode not enabled but zoned device found");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (zoned_devices != nr_devices) {
+ btrfs_err(fs_info,
+ "zoned: cannot mix zoned and regular devices");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * stripe_size is always aligned to BTRFS_STRIPE_LEN in
+ * __btrfs_alloc_chunk(). Since we want stripe_len == zone_size,
+ * check the alignment here.
+ */
+ if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
+ btrfs_err(fs_info,
+ "zoned: zone size %llu not aligned to stripe %u",
+ zone_size, BTRFS_STRIPE_LEN);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
+ btrfs_err(fs_info, "zoned: mixed block groups not supported");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fs_info->zone_size = zone_size;
+ fs_info->max_zone_append_size = max_zone_append_size;
+
+ btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
+out:
+ return ret;
+}
+
+int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
+{
+ if (!btrfs_is_zoned(info))
+ return 0;
+
+ /*
+ * Space cache writing is not COWed. Disable that to avoid write errors
+ * in sequential zones.
+ */
+ if (btrfs_test_opt(info, SPACE_CACHE)) {
+ btrfs_err(info, "zoned: space cache v1 is not supported");
+ return -EINVAL;
+ }
+
+ if (btrfs_test_opt(info, NODATACOW)) {
+ btrfs_err(info, "zoned: NODATACOW not supported");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
+ int rw, u64 *bytenr_ret)
+{
+ u64 wp;
+ int ret;
+
+ if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ *bytenr_ret = zones[0].start << SECTOR_SHIFT;
+ return 0;
+ }
+
+ ret = sb_write_pointer(bdev, zones, &wp);
+ if (ret != -ENOENT && ret < 0)
+ return ret;
+
+ if (rw == WRITE) {
+ struct blk_zone *reset = NULL;
+
+ if (wp == zones[0].start << SECTOR_SHIFT)
+ reset = &zones[0];
+ else if (wp == zones[1].start << SECTOR_SHIFT)
+ reset = &zones[1];
+
+ if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
+ ASSERT(reset->cond == BLK_ZONE_COND_FULL);
+
+ ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ reset->start, reset->len,
+ GFP_NOFS);
+ if (ret)
+ return ret;
+
+ reset->cond = BLK_ZONE_COND_EMPTY;
+ reset->wp = reset->start;
+ }
+ } else if (ret != -ENOENT) {
+ /* For READ, we want the precious one */
+ if (wp == zones[0].start << SECTOR_SHIFT)
+ wp = (zones[1].start + zones[1].len) << SECTOR_SHIFT;
+ wp -= BTRFS_SUPER_INFO_SIZE;
+ }
+
+ *bytenr_ret = wp;
+ return 0;
+
+}
+
+int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
+ u64 *bytenr_ret)
+{
+ struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
+ unsigned int zone_sectors;
+ u32 sb_zone;
+ int ret;
+ u64 zone_size;
+ u8 zone_sectors_shift;
+ sector_t nr_sectors;
+ u32 nr_zones;
+
+ if (!bdev_is_zoned(bdev)) {
+ *bytenr_ret = btrfs_sb_offset(mirror);
+ return 0;
+ }
+
+ ASSERT(rw == READ || rw == WRITE);
+
+ zone_sectors = bdev_zone_sectors(bdev);
+ if (!is_power_of_2(zone_sectors))
+ return -EINVAL;
+ zone_size = zone_sectors << SECTOR_SHIFT;
+ zone_sectors_shift = ilog2(zone_sectors);
+ nr_sectors = bdev->bd_part->nr_sects;
+ nr_zones = nr_sectors >> zone_sectors_shift;
+
+ sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
+ if (sb_zone + 1 >= nr_zones)
+ return -ENOENT;
+
+ ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+ BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
+ zones);
+ if (ret < 0)
+ return ret;
+ if (ret != BTRFS_NR_SB_LOG_ZONES)
+ return -EIO;
+
+ return sb_log_location(bdev, zones, rw, bytenr_ret);
+}
+
+int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
+ u64 *bytenr_ret)
+{
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ u32 zone_num;
+
+ if (!zinfo) {
+ *bytenr_ret = btrfs_sb_offset(mirror);
+ return 0;
+ }
+
+ zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
+ if (zone_num + 1 >= zinfo->nr_zones)
+ return -ENOENT;
+
+ return sb_log_location(device->bdev,
+ &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
+ rw, bytenr_ret);
+}
+
+static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
+ int mirror)
+{
+ u32 zone_num;
+
+ if (!zinfo)
+ return false;
+
+ zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
+ if (zone_num + 1 >= zinfo->nr_zones)
+ return false;
+
+ if (!test_bit(zone_num, zinfo->seq_zones))
+ return false;
+
+ return true;
+}
+
+void btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
+{
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ struct blk_zone *zone;
+
+ if (!is_sb_log_zone(zinfo, mirror))
+ return;
+
+ zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
+ if (zone->cond != BLK_ZONE_COND_FULL) {
+ if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
+
+ if (zone->wp == zone->start + zone->len)
+ zone->cond = BLK_ZONE_COND_FULL;
+
+ return;
+ }
+
+ zone++;
+ ASSERT(zone->cond != BLK_ZONE_COND_FULL);
+ if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
+
+ if (zone->wp == zone->start + zone->len)
+ zone->cond = BLK_ZONE_COND_FULL;
+}
+
+int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
+{
+ sector_t zone_sectors;
+ sector_t nr_sectors;
+ u8 zone_sectors_shift;
+ u32 sb_zone;
+ u32 nr_zones;
+
+ zone_sectors = bdev_zone_sectors(bdev);
+ zone_sectors_shift = ilog2(zone_sectors);
+ nr_sectors = bdev->bd_part->nr_sects;
+ nr_zones = nr_sectors >> zone_sectors_shift;
+
+ sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
+ if (sb_zone + 1 >= nr_zones)
+ return -ENOENT;
+
+ return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
+ sb_zone << zone_sectors_shift,
+ zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
new file mode 100644
index 000000000000..8abe2f83272b
--- /dev/null
+++ b/fs/btrfs/zoned.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_ZONED_H
+#define BTRFS_ZONED_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include "volumes.h"
+#include "disk-io.h"
+
+struct btrfs_zoned_device_info {
+ /*
+ * Number of zones, zone size and types of zones if bdev is a
+ * zoned block device.
+ */
+ u64 zone_size;
+ u8 zone_size_shift;
+ u64 max_zone_append_size;
+ u32 nr_zones;
+ unsigned long *seq_zones;
+ unsigned long *empty_zones;
+ struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
+};
+
+#ifdef CONFIG_BLK_DEV_ZONED
+int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
+ struct blk_zone *zone);
+int btrfs_get_dev_zone_info(struct btrfs_device *device);
+void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
+int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
+int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info);
+int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
+ u64 *bytenr_ret);
+int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
+ u64 *bytenr_ret);
+void btrfs_advance_sb_log(struct btrfs_device *device, int mirror);
+int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror);
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
+ struct blk_zone *zone)
+{
+ return 0;
+}
+
+static inline int btrfs_get_dev_zone_info(struct btrfs_device *device)
+{
+ return 0;
+}
+
+static inline void btrfs_destroy_dev_zone_info(struct btrfs_device *device) { }
+
+static inline int btrfs_check_zoned_mode(const struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ btrfs_err(fs_info, "zoned block devices support is not enabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
+{
+ return 0;
+}
+
+static inline int btrfs_sb_log_location_bdev(struct block_device *bdev,
+ int mirror, int rw, u64 *bytenr_ret)
+{
+ *bytenr_ret = btrfs_sb_offset(mirror);
+ return 0;
+}
+
+static inline int btrfs_sb_log_location(struct btrfs_device *device, int mirror,
+ int rw, u64 *bytenr_ret)
+{
+ *bytenr_ret = btrfs_sb_offset(mirror);
+ return 0;
+}
+
+static inline void btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
+{ }
+
+static inline int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
+{
+ return 0;
+}
+
+#endif
+
+static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
+{
+ struct btrfs_zoned_device_info *zone_info = device->zone_info;
+
+ if (!zone_info)
+ return false;
+
+ return test_bit(pos >> zone_info->zone_size_shift, zone_info->seq_zones);
+}
+
+static inline bool btrfs_dev_is_empty_zone(struct btrfs_device *device, u64 pos)
+{
+ struct btrfs_zoned_device_info *zone_info = device->zone_info;
+
+ if (!zone_info)
+ return true;
+
+ return test_bit(pos >> zone_info->zone_size_shift, zone_info->empty_zones);
+}
+
+static inline void btrfs_dev_set_empty_zone_bit(struct btrfs_device *device,
+ u64 pos, bool set)
+{
+ struct btrfs_zoned_device_info *zone_info = device->zone_info;
+ unsigned int zno;
+
+ if (!zone_info)
+ return;
+
+ zno = pos >> zone_info->zone_size_shift;
+ if (set)
+ set_bit(zno, zone_info->empty_zones);
+ else
+ clear_bit(zno, zone_info->empty_zones);
+}
+
+static inline void btrfs_dev_set_zone_empty(struct btrfs_device *device, u64 pos)
+{
+ btrfs_dev_set_empty_zone_bit(device, pos, true);
+}
+
+static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 pos)
+{
+ btrfs_dev_set_empty_zone_bit(device, pos, false);
+}
+
+static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info,
+ struct block_device *bdev)
+{
+ u64 zone_size;
+
+ if (btrfs_is_zoned(fs_info)) {
+ zone_size = bdev_zone_sectors(bdev) << SECTOR_SHIFT;
+ /* Do not allow non-zoned device */
+ return bdev_is_zoned(bdev) && fs_info->zone_size == zone_size;
+ }
+
+ /* Do not allow Host Manged zoned device */
+ return bdev_zoned_model(bdev) != BLK_ZONED_HM;
+}
+
+static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 pos)
+{
+ /*
+ * On a non-zoned device, any address is OK. On a zoned device,
+ * non-SEQUENTIAL WRITE REQUIRED zones are capable.
+ */
+ return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos);
+}
+
+#endif