From 424eb881300467a21a108d04c9dd08a6f8c007dc Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 25 Mar 2019 15:10:15 -0400 Subject: bcachefs: Only get btree iters from btree transactions Signed-off-by: Kent Overstreet --- fs/bcachefs/alloc_background.c | 16 ++-- fs/bcachefs/btree_gc.c | 34 +++++---- fs/bcachefs/btree_io.c | 18 +++-- fs/bcachefs/btree_iter.c | 63 ++++++++++++---- fs/bcachefs/btree_iter.h | 53 ++++++------- fs/bcachefs/btree_update_leaf.c | 2 +- fs/bcachefs/chardev.c | 4 +- fs/bcachefs/debug.c | 41 ++++++---- fs/bcachefs/dirent.c | 19 +++-- fs/bcachefs/ec.c | 39 ++++++---- fs/bcachefs/extents.c | 11 ++- fs/bcachefs/fs-io.c | 59 +++++++++------ fs/bcachefs/fs.c | 11 ++- fs/bcachefs/fsck.c | 146 ++++++++++++++++++------------------ fs/bcachefs/fsck.h | 1 - fs/bcachefs/inode.c | 12 +-- fs/bcachefs/io.c | 57 ++++++++------ fs/bcachefs/journal_seq_blacklist.c | 17 +++-- fs/bcachefs/migrate.c | 22 +++--- fs/bcachefs/move.c | 72 ++++++++++++------ fs/bcachefs/move_types.h | 3 +- fs/bcachefs/quota.c | 18 +++-- fs/bcachefs/rebalance.c | 4 +- fs/bcachefs/str_hash.h | 46 ++++++++---- fs/bcachefs/sysfs.c | 9 ++- fs/bcachefs/tests.c | 121 ++++++++++++++++++------------ fs/bcachefs/xattr.c | 10 ++- 27 files changed, 550 insertions(+), 358 deletions(-) (limited to 'fs') diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index da25a1ed5206..436eb1e1ab07 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -264,18 +264,21 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k) int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) { struct journal_replay *r; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; struct bch_dev *ca; unsigned i; int ret; - for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k) { bch2_alloc_read_key(c, k); - bch2_btree_iter_cond_resched(&iter); + bch2_trans_cond_resched(&trans); } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans); if (ret) return ret; @@ -391,8 +394,6 @@ static int __bch2_alloc_write_key(struct btree_trans *trans, struct bch_dev *ca, __alloc_write_key(a, g, m); percpu_up_read(&c->mark_lock); - bch2_btree_iter_cond_resched(iter); - bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i)); ret = bch2_trans_commit(trans, NULL, journal_seq, @@ -450,6 +451,7 @@ int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote) if (ret) break; + bch2_trans_cond_resched(&trans); *wrote = true; } up_read(&ca->bucket_lock); @@ -938,8 +940,6 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans, spin_unlock(&c->freelist_lock); percpu_up_read(&c->mark_lock); - bch2_btree_iter_cond_resched(iter); - BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8); bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b)); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 302793d84b92..aa8ac7d661ee 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -207,13 +207,16 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, bool initial) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct btree *b; struct range_checks r; unsigned depth = btree_node_type_needs_gc(btree_id) ? 0 : 1; u8 max_stale; int ret = 0; + bch2_trans_init(&trans, c); + gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0)); /* @@ -227,7 +230,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, btree_node_range_checks_init(&r, depth); - __for_each_btree_node(&iter, c, btree_id, POS_MIN, + __for_each_btree_node(&trans, iter, btree_id, POS_MIN, 0, depth, BTREE_ITER_PREFETCH, b) { btree_node_range_checks(c, b, &r); @@ -241,22 +244,22 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id, if (!initial) { if (max_stale > 64) - bch2_btree_node_rewrite(c, &iter, + bch2_btree_node_rewrite(c, iter, b->data->keys.seq, BTREE_INSERT_USE_RESERVE| BTREE_INSERT_NOWAIT| BTREE_INSERT_GC_LOCK_HELD); else if (!btree_gc_rewrite_disabled(c) && (btree_gc_always_rewrite(c) || max_stale > 16)) - bch2_btree_node_rewrite(c, &iter, + bch2_btree_node_rewrite(c, iter, b->data->keys.seq, BTREE_INSERT_NOWAIT| BTREE_INSERT_GC_LOCK_HELD); } - bch2_btree_iter_cond_resched(&iter); + bch2_trans_cond_resched(&trans); } - ret = bch2_btree_iter_unlock(&iter) ?: ret; + ret = bch2_trans_exit(&trans) ?: ret; if (ret) return ret; @@ -1030,7 +1033,8 @@ next: static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct btree *b; bool kthread = (current->flags & PF_KTHREAD) != 0; unsigned i; @@ -1039,6 +1043,8 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) struct btree *merge[GC_MERGE_NODES]; u32 lock_seq[GC_MERGE_NODES]; + bch2_trans_init(&trans, c); + /* * XXX: We don't have a good way of positively matching on sibling nodes * that have the same parent - this code works by handling the cases @@ -1048,7 +1054,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) */ memset(merge, 0, sizeof(merge)); - __for_each_btree_node(&iter, c, btree_id, POS_MIN, + __for_each_btree_node(&trans, iter, btree_id, POS_MIN, BTREE_MAX_DEPTH, 0, BTREE_ITER_PREFETCH, b) { memmove(merge + 1, merge, @@ -1070,7 +1076,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) } memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0])); - bch2_coalesce_nodes(c, &iter, merge); + bch2_coalesce_nodes(c, iter, merge); for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) { lock_seq[i] = merge[i]->lock.state.seq; @@ -1080,23 +1086,23 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) lock_seq[0] = merge[0]->lock.state.seq; if (kthread && kthread_should_stop()) { - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return -ESHUTDOWN; } - bch2_btree_iter_cond_resched(&iter); + bch2_trans_cond_resched(&trans); /* * If the parent node wasn't relocked, it might have been split * and the nodes in our sliding window might not have the same * parent anymore - blow away the sliding window: */ - if (btree_iter_node(&iter, iter.level + 1) && - !btree_node_intent_locked(&iter, iter.level + 1)) + if (btree_iter_node(iter, iter->level + 1) && + !btree_node_intent_locked(iter, iter->level + 1)) memset(merge + 1, 0, (GC_MERGE_NODES - 1) * sizeof(merge[0])); } - return bch2_btree_iter_unlock(&iter); + return bch2_trans_exit(&trans); } /** diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index d785e6ac22f7..10b3d53b6ebb 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1153,19 +1153,21 @@ static void bch2_btree_node_write_error(struct bch_fs *c, struct bkey_i_btree_ptr *new_key; struct bkey_s_btree_ptr bp; struct bch_extent_ptr *ptr; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; int ret; - __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p, - BTREE_MAX_DEPTH, - b->level, BTREE_ITER_NODES); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p, + BTREE_MAX_DEPTH, b->level, 0); retry: - ret = bch2_btree_iter_traverse(&iter); + ret = bch2_btree_iter_traverse(iter); if (ret) goto err; /* has node been freed? */ - if (iter.l[b->level].b != b) { + if (iter->l[b->level].b != b) { /* node has been freed: */ BUG_ON(!btree_node_dying(b)); goto out; @@ -1184,13 +1186,13 @@ retry: if (!bch2_bkey_nr_ptrs(bp.s_c)) goto err; - ret = bch2_btree_node_update_key(c, &iter, b, new_key); + ret = bch2_btree_node_update_key(c, iter, b, new_key); if (ret == -EINTR) goto retry; if (ret) goto err; out: - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); bio_put(&wbio->wbio.bio); btree_node_write_done(c, b); return; diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 3d613e8cd55b..b2446b14bf33 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -1582,15 +1582,15 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) return __bch2_btree_iter_peek_slot(iter); } -void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, - enum btree_id btree_id, struct bpos pos, - unsigned locks_want, unsigned depth, - unsigned flags) +static inline void bch2_btree_iter_init(struct btree_iter *iter, + struct bch_fs *c, enum btree_id btree_id, + struct bpos pos, unsigned flags) { unsigned i; - EBUG_ON(depth >= BTREE_MAX_DEPTH); - EBUG_ON(locks_want > BTREE_MAX_DEPTH); + if (btree_id == BTREE_ID_EXTENTS && + !(flags & BTREE_ITER_NODES)) + flags |= BTREE_ITER_IS_EXTENTS; iter->c = c; iter->pos = pos; @@ -1599,8 +1599,8 @@ void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, iter->flags = flags; iter->uptodate = BTREE_ITER_NEED_TRAVERSE; iter->btree_id = btree_id; - iter->level = depth; - iter->locks_want = locks_want; + iter->level = 0; + iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0; iter->nodes_locked = 0; iter->nodes_intent_locked = 0; for (i = 0; i < ARRAY_SIZE(iter->l); i++) @@ -1677,12 +1677,14 @@ static inline unsigned btree_trans_iter_idx(struct btree_trans *trans, return idx; } -void bch2_trans_iter_put(struct btree_trans *trans, - struct btree_iter *iter) +int bch2_trans_iter_put(struct btree_trans *trans, + struct btree_iter *iter) { ssize_t idx = btree_trans_iter_idx(trans, iter); + int ret = (iter->flags & BTREE_ITER_ERROR) ? -EIO : 0; trans->iters_live &= ~(1ULL << idx); + return ret; } static inline void __bch2_trans_iter_free(struct btree_trans *trans, @@ -1696,17 +1698,23 @@ static inline void __bch2_trans_iter_free(struct btree_trans *trans, bch2_btree_iter_unlink(&trans->iters[idx]); } -void bch2_trans_iter_free(struct btree_trans *trans, - struct btree_iter *iter) +int bch2_trans_iter_free(struct btree_trans *trans, + struct btree_iter *iter) { + int ret = (iter->flags & BTREE_ITER_ERROR) ? -EIO : 0; + __bch2_trans_iter_free(trans, btree_trans_iter_idx(trans, iter)); + return ret; } -void bch2_trans_iter_free_on_commit(struct btree_trans *trans, - struct btree_iter *iter) +int bch2_trans_iter_free_on_commit(struct btree_trans *trans, + struct btree_iter *iter) { + int ret = (iter->flags & BTREE_ITER_ERROR) ? -EIO : 0; + trans->iters_unlink_on_commit |= 1ULL << btree_trans_iter_idx(trans, iter); + return ret; } static int btree_trans_realloc_iters(struct btree_trans *trans, @@ -1820,7 +1828,7 @@ got_slot: iter = &trans->iters[idx]; iter->id = iter_id; - bch2_btree_iter_init(iter, trans->c, btree_id, POS_MIN, flags); + bch2_btree_iter_init(iter, trans->c, btree_id, pos, flags); } else { iter = &trans->iters[idx]; @@ -1861,6 +1869,31 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, return iter; } +struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans, + enum btree_id btree_id, + struct bpos pos, + unsigned locks_want, + unsigned depth, + unsigned flags) +{ + struct btree_iter *iter = + __btree_trans_get_iter(trans, btree_id, pos, + flags|BTREE_ITER_NODES, 0); + unsigned i; + + BUG_ON(IS_ERR(iter)); + BUG_ON(bkey_cmp(iter->pos, pos)); + + iter->locks_want = locks_want; + iter->level = depth; + + for (i = 0; i < ARRAY_SIZE(iter->l); i++) + iter->l[i].b = NULL; + iter->l[iter->level].b = BTREE_ITER_NOT_END; + + return iter; +} + struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src, u64 iter_id) diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 04f747180bd8..267cecd05d84 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -150,20 +150,6 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *); void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos); void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos); -void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *, - enum btree_id, struct bpos, - unsigned , unsigned, unsigned); - -static inline void bch2_btree_iter_init(struct btree_iter *iter, - struct bch_fs *c, enum btree_id btree_id, - struct bpos pos, unsigned flags) -{ - __bch2_btree_iter_init(iter, c, btree_id, pos, - flags & BTREE_ITER_INTENT ? 1 : 0, 0, - (btree_id == BTREE_ID_EXTENTS - ? BTREE_ITER_IS_EXTENTS : 0)|flags); -} - void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *); static inline struct bpos btree_type_successor(enum btree_id id, @@ -221,17 +207,18 @@ static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter) } } -#define __for_each_btree_node(_iter, _c, _btree_id, _start, \ +#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \ _locks_want, _depth, _flags, _b) \ - for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), _start, \ - _locks_want, _depth, \ - _flags|BTREE_ITER_NODES), \ + for (iter = bch2_trans_get_node_iter((_trans), (_btree_id), \ + _start, _locks_want, _depth, _flags), \ _b = bch2_btree_iter_peek_node(_iter); \ (_b); \ (_b) = bch2_btree_iter_next_node(_iter, _depth)) -#define for_each_btree_node(_iter, _c, _btree_id, _start, _flags, _b) \ - __for_each_btree_node(_iter, _c, _btree_id, _start, 0, 0, _flags, _b) +#define for_each_btree_node(_trans, _iter, _btree_id, _start, \ + _flags, _b) \ + __for_each_btree_node(_trans, _iter, _btree_id, _start, \ + 0, 0, _flags, _b) static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, unsigned flags) @@ -251,9 +238,9 @@ static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter, : bch2_btree_iter_next(iter); } -#define for_each_btree_key(_iter, _c, _btree_id, _start, _flags, _k) \ - for (bch2_btree_iter_init((_iter), (_c), (_btree_id), \ - (_start), (_flags)), \ +#define for_each_btree_key(_trans, _iter, _btree_id, _start, _flags, _k)\ + for (iter = bch2_trans_get_iter((_trans), (_btree_id), \ + (_start), (_flags)), \ (_k) = __bch2_btree_iter_peek(_iter, _flags); \ !IS_ERR_OR_NULL((_k).k); \ (_k) = __bch2_btree_iter_next(_iter, _flags)) @@ -271,9 +258,9 @@ static inline int btree_iter_err(struct bkey_s_c k) /* new multiple iterator interface: */ void bch2_trans_preload_iters(struct btree_trans *); -void bch2_trans_iter_put(struct btree_trans *, struct btree_iter *); -void bch2_trans_iter_free(struct btree_trans *, struct btree_iter *); -void bch2_trans_iter_free_on_commit(struct btree_trans *, struct btree_iter *); +int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *); +int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *); +int bch2_trans_iter_free_on_commit(struct btree_trans *, struct btree_iter *); void bch2_trans_unlink_iters(struct btree_trans *, u64); @@ -308,6 +295,10 @@ bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src) return __bch2_trans_copy_iter(trans, src, __btree_iter_id()); } +struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *, + enum btree_id, struct bpos, + unsigned, unsigned, unsigned); + void __bch2_trans_begin(struct btree_trans *); static inline void bch2_trans_begin_updates(struct btree_trans *trans) @@ -320,6 +311,16 @@ int bch2_trans_unlock(struct btree_trans *); void bch2_trans_init(struct btree_trans *, struct bch_fs *); int bch2_trans_exit(struct btree_trans *); +static inline void bch2_trans_cond_resched(struct btree_trans *trans) +{ + if (need_resched()) { + bch2_trans_unlock(trans); + schedule(); + } else if (race_fault()) { + bch2_trans_unlock(trans); + } +} + #ifdef TRACE_TRANSACTION_RESTARTS #define bch2_trans_begin(_trans) \ do { \ diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index 1c9bfec922c5..45838db7b991 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -999,7 +999,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, if (ret) break; - bch2_btree_iter_cond_resched(iter); + bch2_trans_cond_resched(&trans); } bch2_trans_exit(&trans); diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c index 5ee38a6a442f..f7cfec9f00f9 100644 --- a/fs/bcachefs/chardev.c +++ b/fs/bcachefs/chardev.c @@ -303,8 +303,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf, struct bch_ioctl_data_event e = { .type = BCH_DATA_EVENT_PROGRESS, .p.data_type = ctx->stats.data_type, - .p.btree_id = ctx->stats.iter.btree_id, - .p.pos = ctx->stats.iter.pos, + .p.btree_id = ctx->stats.btree_id, + .p.pos = ctx->stats.pos, .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen), .p.sectors_total = bch2_fs_usage_read_short(c).used, }; diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index f15c29878a9e..64e079280a9a 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -205,7 +205,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int err; @@ -220,8 +221,10 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, if (!i->size) return i->ret; - bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH); - k = bch2_btree_iter_peek(&iter); + bch2_trans_init(&trans, i->c); + + iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH); + k = bch2_btree_iter_peek(iter); while (k.k && !(err = btree_iter_err(k))) { bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k); @@ -230,8 +233,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, i->buf[i->bytes] = '\n'; i->bytes++; - k = bch2_btree_iter_next(&iter); - i->from = iter.pos; + k = bch2_btree_iter_next(iter); + i->from = iter->pos; err = flush_buf(i); if (err) @@ -240,7 +243,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, if (!i->size) break; } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return err < 0 ? err : i->ret; } @@ -256,7 +259,8 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct btree *b; int err; @@ -271,7 +275,9 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, if (!i->size || !bkey_cmp(POS_MAX, i->from)) return i->ret; - for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) { + bch2_trans_init(&trans, i->c); + + for_each_btree_node(&trans, iter, i->id, i->from, 0, b) { bch2_btree_node_to_text(&PBUF(i->buf), i->c, b); i->bytes = strlen(i->buf); err = flush_buf(i); @@ -289,7 +295,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, if (!i->size) break; } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return err < 0 ? err : i->ret; } @@ -305,7 +311,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; struct btree *prev_node = NULL; int err; @@ -321,11 +328,13 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, if (!i->size) return i->ret; - bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH); + bch2_trans_init(&trans, i->c); + + iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH); - while ((k = bch2_btree_iter_peek(&iter)).k && + while ((k = bch2_btree_iter_peek(iter)).k && !(err = btree_iter_err(k))) { - struct btree_iter_level *l = &iter.l[0]; + struct btree_iter_level *l = &iter->l[0]; struct bkey_packed *_k = bch2_btree_node_iter_peek(&l->iter, l->b); @@ -344,8 +353,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, if (err) break; - bch2_btree_iter_next(&iter); - i->from = iter.pos; + bch2_btree_iter_next(iter); + i->from = iter->pos; err = flush_buf(i); if (err) @@ -354,7 +363,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, if (!i->size) break; } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return err < 0 ? err : i->ret; } diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index dc3883204d80..672a94936179 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -331,11 +331,15 @@ out: int bch2_empty_dir(struct bch_fs *c, u64 dir_inum) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret = 0; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(dir_inum, 0), 0, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, + POS(dir_inum, 0), 0, k) { if (k.k->p.inode > dir_inum) break; @@ -344,7 +348,7 @@ int bch2_empty_dir(struct bch_fs *c, u64 dir_inum) break; } } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return ret; } @@ -353,7 +357,8 @@ int bch2_readdir(struct bch_fs *c, struct file *file, struct dir_context *ctx) { struct bch_inode_info *inode = file_bch_inode(file); - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; struct bkey_s_c_dirent dirent; unsigned len; @@ -361,7 +366,9 @@ int bch2_readdir(struct bch_fs *c, struct file *file, if (!dir_emit_dots(file, ctx)) return 0; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(inode->v.i_ino, ctx->pos), 0, k) { if (k.k->type != KEY_TYPE_dirent) continue; @@ -387,7 +394,7 @@ int bch2_readdir(struct bch_fs *c, struct file *file, ctx->pos = k.k->p.offset + 1; } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return 0; } diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index a989ba172faa..c33bcffa7871 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -398,7 +398,8 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, /* recovery read path: */ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct ec_stripe_buf *buf; struct closure cl; struct bkey_s_c k; @@ -419,19 +420,21 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) if (!buf) return -ENOMEM; - bch2_btree_iter_init(&iter, c, BTREE_ID_EC, - POS(0, stripe_idx), - BTREE_ITER_SLOTS); - k = bch2_btree_iter_peek_slot(&iter); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, + POS(0, stripe_idx), + BTREE_ITER_SLOTS); + k = bch2_btree_iter_peek_slot(iter); if (btree_iter_err(k) || k.k->type != KEY_TYPE_stripe) { __bcache_io_error(c, "error doing reconstruct read: stripe not found"); kfree(buf); - return bch2_btree_iter_unlock(&iter) ?: -EIO; + return bch2_trans_exit(&trans) ?: -EIO; } bkey_reassemble(&buf->key.k_i, k); - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); v = &buf->key.v; @@ -1238,7 +1241,8 @@ static void bch2_stripe_read_key(struct bch_fs *c, struct bkey_s_c k) int bch2_stripes_read(struct bch_fs *c, struct list_head *journal_replay_list) { struct journal_replay *r; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret; @@ -1246,12 +1250,14 @@ int bch2_stripes_read(struct bch_fs *c, struct list_head *journal_replay_list) if (ret) return ret; - for_each_btree_key(&iter, c, BTREE_ID_EC, POS_MIN, 0, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k) { bch2_stripe_read_key(c, k); - bch2_btree_iter_cond_resched(&iter); + bch2_trans_cond_resched(&trans); } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans); if (ret) return ret; @@ -1269,17 +1275,20 @@ int bch2_stripes_read(struct bch_fs *c, struct list_head *journal_replay_list) int bch2_ec_mem_alloc(struct bch_fs *c, bool gc) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; size_t i, idx = 0; int ret = 0; - bch2_btree_iter_init(&iter, c, BTREE_ID_EC, POS(0, U64_MAX), 0); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0); - k = bch2_btree_iter_prev(&iter); + k = bch2_btree_iter_prev(iter); if (!IS_ERR_OR_NULL(k.k)) idx = k.k->p.offset + 1; - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans); if (ret) return ret; diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 194b8d6da1bb..ce46417b07a0 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -1623,15 +1623,18 @@ static bool bch2_extent_merge_inline(struct bch_fs *c, bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size, unsigned nr_replicas) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bpos end = pos; struct bkey_s_c k; bool ret = true; end.offset += size; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos, - BTREE_ITER_SLOTS, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos, + BTREE_ITER_SLOTS, k) { if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) break; @@ -1640,7 +1643,7 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size, break; } } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return ret; } diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 251c811abeda..efc189c02db7 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -997,7 +997,8 @@ void bch2_readahead(struct readahead_control *ractl) struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_io_opts opts = io_opts(c, inode); - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct page *page; struct readpages_iter readpages_iter; int ret; @@ -1005,8 +1006,10 @@ void bch2_readahead(struct readahead_control *ractl) ret = readpages_iter_init(&readpages_iter, ractl); BUG_ON(ret); - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, - BTREE_ITER_SLOTS); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, + BTREE_ITER_SLOTS); bch2_pagecache_add_get(&inode->ei_pagecache_lock); @@ -1027,26 +1030,33 @@ void bch2_readahead(struct readahead_control *ractl) rbio->bio.bi_end_io = bch2_readpages_end_io; __bio_add_page(&rbio->bio, page, PAGE_SIZE, 0); - bchfs_read(c, &iter, rbio, inode->v.i_ino, &readpages_iter); + bchfs_read(c, iter, rbio, inode->v.i_ino, &readpages_iter); } bch2_pagecache_add_put(&inode->ei_pagecache_lock); + + bch2_trans_exit(&trans); kfree(readpages_iter.pages); } static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio, u64 inum, struct page *page) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; page_state_init_for_read(page); rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC; bio_add_page_contig(&rbio->bio, page); - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, - BTREE_ITER_SLOTS); - bchfs_read(c, &iter, rbio, inum, NULL); + bch2_trans_init(&trans, c); + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, + BTREE_ITER_SLOTS); + + bchfs_read(c, iter, rbio, inum, NULL); + + bch2_trans_exit(&trans); } static void bch2_read_single_page_end_io(struct bio *bio) @@ -2111,7 +2121,7 @@ static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode, if (ret) break; - bch2_btree_iter_cond_resched(iter); + bch2_trans_cond_resched(&trans); } bch2_trans_exit(&trans); @@ -2123,13 +2133,14 @@ static inline int range_has_data(struct bch_fs *c, struct bpos start, struct bpos end) { - - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret = 0; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, - start, 0, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k) { if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) break; @@ -2139,7 +2150,7 @@ static inline int range_has_data(struct bch_fs *c, } } - return bch2_btree_iter_unlock(&iter) ?: ret; + return bch2_trans_exit(&trans) ?: ret; } static int __bch2_truncate_page(struct bch_inode_info *inode, @@ -2464,7 +2475,7 @@ btree_iter_err: * pointers... which isn't a _super_ serious problem... */ - bch2_btree_iter_cond_resched(src); + bch2_trans_cond_resched(&trans); } bch2_trans_unlock(&trans); @@ -2709,7 +2720,8 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) { struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 isize, next_data = MAX_LFS_FILESIZE; int ret; @@ -2718,7 +2730,9 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) if (offset >= isize) return -ENXIO; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(inode->v.i_ino, offset >> 9), 0, k) { if (k.k->p.inode != inode->v.i_ino) { break; @@ -2729,7 +2743,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) break; } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans); if (ret) return ret; @@ -2779,7 +2793,8 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) { struct bch_inode_info *inode = file_bch_inode(file); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 isize, next_hole = MAX_LFS_FILESIZE; int ret; @@ -2788,7 +2803,9 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) if (offset >= isize) return -ENXIO; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(inode->v.i_ino, offset >> 9), BTREE_ITER_SLOTS, k) { if (k.k->p.inode != inode->v.i_ino) { @@ -2807,7 +2824,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) } } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans); if (ret) return ret; diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 2a5a90b2a781..6e377a0e176f 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -157,7 +157,7 @@ int __must_check bch2_write_inode_trans(struct btree_trans *trans, void *p) { struct bch_fs *c = trans->c; - struct btree_iter *iter; + struct btree_iter *iter = NULL; struct bkey_inode_buf *inode_p; int ret; @@ -1193,7 +1193,8 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, { struct bch_fs *c = vinode->i_sb->s_fs_info; struct bch_inode_info *ei = to_bch_ei(vinode); - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; BKEY_PADDED(k) tmp; bool have_extent = false; @@ -1206,7 +1207,9 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, if (start + len < start) return -EINVAL; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(ei->v.i_ino, start >> 9), 0, k) if (bkey_extent_is_data(k.k) || k.k->type == KEY_TYPE_reservation) { @@ -1227,7 +1230,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, if (have_extent) ret = bch2_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST); out: - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return ret < 0 ? ret : 0; } diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 439f758d8178..41284d38db2f 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -16,6 +16,23 @@ #define QSTR(n) { { { .len = strlen(n) } }, .name = n } +static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum) +{ + struct btree_iter *iter; + struct bkey_s_c k; + u64 sectors = 0; + + for_each_btree_key(trans, iter, BTREE_ID_EXTENTS, POS(inum, 0), 0, k) { + if (k.k->p.inode != inum) + break; + + if (bkey_extent_is_allocation(k.k)) + sectors += k.k->size; + } + + return bch2_trans_iter_free(trans, iter) ?: sectors; +} + static int remove_dirent(struct bch_fs *c, struct btree_iter *iter, struct bkey_s_c_dirent dirent) { @@ -181,44 +198,32 @@ err: return ret; } -/* fsck hasn't been converted to new transactions yet: */ -static int fsck_hash_delete_at(const struct bch_hash_desc desc, +static int fsck_hash_delete_at(struct btree_trans *trans, + const struct bch_hash_desc desc, struct bch_hash_info *info, - struct btree_iter *orig_iter) + struct btree_iter *iter) { - struct btree_trans trans; - struct btree_iter *iter; int ret; - - bch2_btree_iter_unlock(orig_iter); - - bch2_trans_init(&trans, orig_iter->c); retry: - bch2_trans_begin(&trans); - - iter = bch2_trans_copy_iter(&trans, orig_iter); - if (IS_ERR(iter)) { - ret = PTR_ERR(iter); - goto err; - } - - ret = bch2_hash_delete_at(&trans, desc, info, iter) ?: - bch2_trans_commit(&trans, NULL, NULL, + ret = bch2_hash_delete_at(trans, desc, info, iter) ?: + bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_ATOMIC| BTREE_INSERT_NOFAIL| BTREE_INSERT_LAZY_RW); -err: - if (ret == -EINTR) - goto retry; + if (ret == -EINTR) { + ret = bch2_btree_iter_traverse(iter); + if (!ret) + goto retry; + } - bch2_trans_exit(&trans); return ret; } -static int hash_check_duplicates(const struct bch_hash_desc desc, - struct hash_check *h, struct bch_fs *c, - struct btree_iter *k_iter, struct bkey_s_c k) +static int hash_check_duplicates(struct btree_trans *trans, + const struct bch_hash_desc desc, struct hash_check *h, + struct btree_iter *k_iter, struct bkey_s_c k) { + struct bch_fs *c = trans->c; struct btree_iter *iter; struct bkey_s_c k2; char buf[200]; @@ -239,7 +244,7 @@ static int hash_check_duplicates(const struct bch_hash_desc desc, "duplicate hash table keys:\n%s", (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf))) { - ret = fsck_hash_delete_at(desc, &h->info, k_iter); + ret = fsck_hash_delete_at(trans, desc, &h->info, k_iter); if (ret) return ret; ret = 1; @@ -274,9 +279,9 @@ static bool key_has_correct_hash(const struct bch_hash_desc desc, hash <= k.k->p.offset; } -static int hash_check_key(const struct bch_hash_desc desc, - struct btree_trans *trans, struct hash_check *h, - struct btree_iter *k_iter, struct bkey_s_c k) +static int hash_check_key(struct btree_trans *trans, + const struct bch_hash_desc desc, struct hash_check *h, + struct btree_iter *k_iter, struct bkey_s_c k) { struct bch_fs *c = trans->c; char buf[200]; @@ -312,7 +317,7 @@ static int hash_check_key(const struct bch_hash_desc desc, return 1; } - ret = hash_check_duplicates(desc, h, c, k_iter, k); + ret = hash_check_duplicates(trans, desc, h, k_iter, k); fsck_err: return ret; } @@ -417,14 +422,17 @@ noinline_for_stack static int check_extents(struct bch_fs *c) { struct inode_walker w = inode_walker_init(); - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 i_sectors; int ret = 0; + bch2_trans_init(&trans, c); + bch_verbose(c, "checking extents"); - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(BCACHEFS_ROOT_INO, 0), 0, k) { ret = walk_inode(c, &w, k.k->p.inode); if (ret) @@ -437,7 +445,7 @@ static int check_extents(struct bch_fs *c) !S_ISREG(w.inode.bi_mode) && !S_ISLNK(w.inode.bi_mode), c, "extent type %u for non regular file, inode %llu mode %o", k.k->type, k.k->p.inode, w.inode.bi_mode)) { - bch2_btree_iter_unlock(&iter); + bch2_trans_unlock(&trans); ret = bch2_inode_truncate(c, k.k->p.inode, 0); if (ret) @@ -449,14 +457,14 @@ static int check_extents(struct bch_fs *c) w.have_inode && !(w.inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY) && w.inode.bi_sectors != - (i_sectors = bch2_count_inode_sectors(c, w.cur_inum)), + (i_sectors = bch2_count_inode_sectors(&trans, w.cur_inum)), c, "i_sectors wrong: got %llu, should be %llu", w.inode.bi_sectors, i_sectors)) { struct bkey_inode_buf p; w.inode.bi_sectors = i_sectors; - bch2_btree_iter_unlock(&iter); + bch2_trans_unlock(&trans); bch2_inode_pack(&p, &w.inode); @@ -470,7 +478,7 @@ static int check_extents(struct bch_fs *c) } /* revalidate iterator: */ - k = bch2_btree_iter_peek(&iter); + k = bch2_btree_iter_peek(iter); } if (fsck_err_on(w.have_inode && @@ -479,7 +487,7 @@ static int check_extents(struct bch_fs *c) k.k->p.offset > round_up(w.inode.bi_size, PAGE_SIZE) >> 9, c, "extent type %u offset %llu past end of inode %llu, i_size %llu", k.k->type, k.k->p.offset, k.k->p.inode, w.inode.bi_size)) { - bch2_btree_iter_unlock(&iter); + bch2_trans_unlock(&trans); ret = bch2_inode_truncate(c, k.k->p.inode, w.inode.bi_size); @@ -490,7 +498,7 @@ static int check_extents(struct bch_fs *c) } err: fsck_err: - return bch2_btree_iter_unlock(&iter) ?: ret; + return bch2_trans_exit(&trans) ?: ret; } /* @@ -688,7 +696,8 @@ static int check_xattrs(struct bch_fs *c) if (w.first_this_inode && w.have_inode) hash_check_set_inode(&h, c, &w.inode); - ret = hash_check_key(bch2_xattr_hash_desc, &trans, &h, iter, k); + ret = hash_check_key(&trans, bch2_xattr_hash_desc, + &h, iter, k); if (ret) goto fsck_err; } @@ -863,13 +872,16 @@ static int check_directory_structure(struct bch_fs *c, struct inode_bitmap dirs_done = { NULL, 0 }; struct pathbuf path = { 0, 0, NULL }; struct pathbuf_entry *e; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; struct bkey_s_c_dirent dirent; bool had_unreachable; u64 d_inum; int ret = 0; + bch2_trans_init(&trans, c); + bch_verbose(c, "checking directory structure"); /* DFS: */ @@ -894,7 +906,7 @@ next: if (e->offset == U64_MAX) goto up; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(e->inum, e->offset + 1), 0, k) { if (k.k->p.inode != e->inum) break; @@ -914,7 +926,7 @@ next: if (fsck_err_on(inode_bitmap_test(&dirs_done, d_inum), c, "directory %llu has multiple hardlinks", d_inum)) { - ret = remove_dirent(c, &iter, dirent); + ret = remove_dirent(c, iter, dirent); if (ret) goto err; continue; @@ -931,10 +943,14 @@ next: goto err; } - bch2_btree_iter_unlock(&iter); + ret = bch2_trans_iter_free(&trans, iter); + if (ret) { + bch_err(c, "btree error %i in fsck", ret); + goto err; + } goto next; } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_iter_free(&trans, iter); if (ret) { bch_err(c, "btree error %i in fsck", ret); goto err; @@ -943,7 +959,7 @@ up: path.nr--; } - for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) { + for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k) { if (k.k->type != KEY_TYPE_inode) continue; @@ -956,7 +972,7 @@ up: if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.inode), c, "unreachable directory found (inum %llu)", k.k->p.inode)) { - bch2_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(iter); ret = reattach_inode(c, lostfound_inode, k.k->p.inode); if (ret) { @@ -966,7 +982,7 @@ up: had_unreachable = true; } } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_iter_free(&trans, iter); if (ret) goto err; @@ -985,7 +1001,7 @@ out: return ret; err: fsck_err: - ret = bch2_btree_iter_unlock(&iter) ?: ret; + ret = bch2_trans_exit(&trans) ?: ret; goto out; } @@ -1022,15 +1038,18 @@ noinline_for_stack static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links, u64 range_start, u64 *range_end) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; struct bkey_s_c_dirent d; u64 d_inum; int ret; + bch2_trans_init(&trans, c); + inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false); - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k) { + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k) { switch (k.k->type) { case KEY_TYPE_dirent: d = bkey_s_c_to_dirent(k); @@ -1046,32 +1065,15 @@ static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links, break; } - bch2_btree_iter_cond_resched(&iter); + bch2_trans_cond_resched(&trans); } - ret = bch2_btree_iter_unlock(&iter); + ret = bch2_trans_exit(&trans); if (ret) bch_err(c, "error in fs gc: btree error %i while walking dirents", ret); return ret; } -s64 bch2_count_inode_sectors(struct bch_fs *c, u64 inum) -{ - struct btree_iter iter; - struct bkey_s_c k; - u64 sectors = 0; - - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(inum, 0), 0, k) { - if (k.k->p.inode != inum) - break; - - if (bkey_extent_is_allocation(k.k)) - sectors += k.k->size; - } - - return bch2_btree_iter_unlock(&iter) ?: sectors; -} - static int check_inode_nlink(struct bch_fs *c, struct bch_inode_unpacked *lostfound_inode, struct bch_inode_unpacked *u, @@ -1253,7 +1255,7 @@ static int check_inode(struct btree_trans *trans, bch_verbose(c, "recounting sectors for inode %llu", u.bi_inum); - sectors = bch2_count_inode_sectors(c, u.bi_inum); + sectors = bch2_count_inode_sectors(trans, u.bi_inum); if (sectors < 0) { bch_err(c, "error in fs gc: error %i " "recounting inode sectors", @@ -1346,7 +1348,7 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); genradix_iter_advance(&nlinks_iter, links); bch2_btree_iter_next(iter); - bch2_btree_iter_cond_resched(iter); + bch2_trans_cond_resched(&trans); } fsck_err: bch2_trans_exit(&trans); diff --git a/fs/bcachefs/fsck.h b/fs/bcachefs/fsck.h index 88da06762d7d..97460452e842 100644 --- a/fs/bcachefs/fsck.h +++ b/fs/bcachefs/fsck.h @@ -2,7 +2,6 @@ #ifndef _BCACHEFS_FSCK_H #define _BCACHEFS_FSCK_H -s64 bch2_count_inode_sectors(struct bch_fs *, u64); int bch2_fsck(struct bch_fs *); #endif /* _BCACHEFS_FSCK_H */ diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 811c917cba84..c6336e7a2a23 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -447,13 +447,15 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr) int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, struct bch_inode_unpacked *inode) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret = -ENOENT; - for_each_btree_key(&iter, c, BTREE_ID_INODES, - POS(inode_nr, 0), - BTREE_ITER_SLOTS, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_INODES, + POS(inode_nr, 0), BTREE_ITER_SLOTS, k) { switch (k.k->type) { case KEY_TYPE_inode: ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode); @@ -466,7 +468,7 @@ int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, break; } - return bch2_btree_iter_unlock(&iter) ?: ret; + return bch2_trans_exit(&trans) ?: ret; } #ifdef CONFIG_BCACHEFS_DEBUG diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index f4c49bf82456..62ee09121036 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -1263,27 +1263,28 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio struct bch_io_failures *failed, unsigned flags) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; BKEY_PADDED(k) tmp; struct bkey_s_c k; int ret; flags &= ~BCH_READ_LAST_FRAGMENT; - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, - rbio->pos, BTREE_ITER_SLOTS); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, + rbio->pos, BTREE_ITER_SLOTS); retry: rbio->bio.bi_status = 0; - k = bch2_btree_iter_peek_slot(&iter); - if (btree_iter_err(k)) { - bch2_btree_iter_unlock(&iter); + k = bch2_btree_iter_peek_slot(iter); + if (btree_iter_err(k)) goto err; - } bkey_reassemble(&tmp.k, k); k = bkey_i_to_s_c(&tmp.k); - bch2_btree_iter_unlock(&iter); + bch2_trans_unlock(&trans); if (!bkey_extent_is_data(k.k) || !bch2_extent_matches_ptr(c, bkey_i_to_s_c_extent(&tmp.k), @@ -1300,25 +1301,30 @@ retry: goto retry; if (ret) goto err; - goto out; -err: - rbio->bio.bi_status = BLK_STS_IOERR; out: bch2_rbio_done(rbio); + bch2_trans_exit(&trans); + return; +err: + rbio->bio.bi_status = BLK_STS_IOERR; + goto out; } static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio, struct bvec_iter bvec_iter, u64 inode, struct bch_io_failures *failed, unsigned flags) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret; + bch2_trans_init(&trans, c); + flags &= ~BCH_READ_LAST_FRAGMENT; flags |= BCH_READ_MUST_CLONE; retry: - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(inode, bvec_iter.bi_sector), BTREE_ITER_SLOTS, k) { BKEY_PADDED(k) tmp; @@ -1326,7 +1332,7 @@ retry: bkey_reassemble(&tmp.k, k); k = bkey_i_to_s_c(&tmp.k); - bch2_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(iter); bytes = min_t(unsigned, bvec_iter.bi_size, (k.k->p.offset - bvec_iter.bi_sector) << 9); @@ -1351,12 +1357,12 @@ retry: * If we get here, it better have been because there was an error * reading a btree node */ - ret = bch2_btree_iter_unlock(&iter); - BUG_ON(!ret); - __bcache_io_error(c, "btree IO error %i", ret); + BUG_ON(!(iter->flags & BTREE_ITER_ERROR)); + __bcache_io_error(c, "btree IO error"); err: rbio->bio.bi_status = BLK_STS_IOERR; out: + bch2_trans_exit(&trans); bch2_rbio_done(rbio); } @@ -1859,12 +1865,14 @@ out_read_done: void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; unsigned flags = BCH_READ_RETRY_IF_STALE| BCH_READ_MAY_PROMOTE| BCH_READ_USER_MAPPED; - int ret; + + bch2_trans_init(&trans, c); BUG_ON(rbio->_state); BUG_ON(flags & BCH_READ_NODECODE); @@ -1873,7 +1881,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) rbio->c = c; rbio->start_time = local_clock(); - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(inode, rbio->bio.bi_iter.bi_sector), BTREE_ITER_SLOTS, k) { BKEY_PADDED(k) tmp; @@ -1885,7 +1893,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) */ bkey_reassemble(&tmp.k, k); k = bkey_i_to_s_c(&tmp.k); - bch2_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(iter); bytes = min_t(unsigned, rbio->bio.bi_iter.bi_size, (k.k->p.offset - rbio->bio.bi_iter.bi_sector) << 9); @@ -1907,9 +1915,10 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) * If we get here, it better have been because there was an error * reading a btree node */ - ret = bch2_btree_iter_unlock(&iter); - BUG_ON(!ret); - bcache_io_error(c, &rbio->bio, "btree IO error %i", ret); + BUG_ON(!(iter->flags & BTREE_ITER_ERROR)); + bcache_io_error(c, &rbio->bio, "btree IO error"); + + bch2_trans_exit(&trans); bch2_rbio_done(rbio); } diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c index c26f36d58633..45c8d38d12de 100644 --- a/fs/bcachefs/journal_seq_blacklist.c +++ b/fs/bcachefs/journal_seq_blacklist.c @@ -62,9 +62,12 @@ static void journal_seq_blacklist_flush(struct journal *j, closure_init_stack(&cl); for (i = 0;; i++) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct btree *b; + bch2_trans_init(&trans, c); + mutex_lock(&j->blacklist_lock); if (i >= bl->nr_entries) { mutex_unlock(&j->blacklist_lock); @@ -73,17 +76,17 @@ static void journal_seq_blacklist_flush(struct journal *j, n = bl->entries[i]; mutex_unlock(&j->blacklist_lock); - __bch2_btree_iter_init(&iter, c, n.btree_id, n.pos, - 0, 0, BTREE_ITER_NODES); + iter = bch2_trans_get_node_iter(&trans, n.btree_id, n.pos, + 0, 0, 0); - b = bch2_btree_iter_peek_node(&iter); + b = bch2_btree_iter_peek_node(iter); /* The node might have already been rewritten: */ if (b->data->keys.seq == n.seq) { - ret = bch2_btree_node_rewrite(c, &iter, n.seq, 0); + ret = bch2_btree_node_rewrite(c, iter, n.seq, 0); if (ret) { - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); bch2_fs_fatal_error(c, "error %i rewriting btree node with blacklisted journal seq", ret); @@ -92,7 +95,7 @@ static void journal_seq_blacklist_flush(struct journal *j, } } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); } for (i = 0;; i++) { diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c index 38bf75b6bc2d..2b63b07db2bc 100644 --- a/fs/bcachefs/migrate.c +++ b/fs/bcachefs/migrate.c @@ -106,7 +106,8 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct closure cl; struct btree *b; unsigned id; @@ -116,13 +117,15 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags) if (flags & BCH_FORCE_IF_METADATA_LOST) return -EINVAL; + bch2_trans_init(&trans, c); closure_init_stack(&cl); mutex_lock(&c->replicas_gc_lock); bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE); for (id = 0; id < BTREE_ID_NR; id++) { - for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { + for_each_btree_node(&trans, iter, id, POS_MIN, + BTREE_ITER_PREFETCH, b) { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; struct bkey_i_btree_ptr *new_key; retry: @@ -134,7 +137,7 @@ retry: * but got -EINTR after upgrading the iter, but * then raced and the node is now gone: */ - bch2_btree_iter_downgrade(&iter); + bch2_btree_iter_downgrade(iter); ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&b->key)); if (ret) @@ -148,16 +151,16 @@ retry: if (ret) goto err; - ret = bch2_btree_node_update_key(c, &iter, b, new_key); + ret = bch2_btree_node_update_key(c, iter, b, new_key); if (ret == -EINTR) { - b = bch2_btree_iter_peek_node(&iter); + b = bch2_btree_iter_peek_node(iter); goto retry; } if (ret) goto err; } } - bch2_btree_iter_unlock(&iter); + bch2_trans_iter_free(&trans, iter); } /* flush relevant btree updates */ @@ -171,14 +174,13 @@ retry: } ret = 0; -out: +err: + bch2_trans_exit(&trans); + ret = bch2_replicas_gc_end(c, ret); mutex_unlock(&c->replicas_gc_lock); return ret; -err: - bch2_btree_iter_unlock(&iter); - goto out; } int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags) diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 8c453ae31525..3f3e34e07f35 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -485,6 +485,8 @@ int bch2_move_data(struct bch_fs *c, struct moving_context ctxt = { .stats = stats }; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); BKEY_PADDED(k) tmp; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; struct data_opts data_opts; enum data_cmd data_cmd; @@ -495,9 +497,14 @@ int bch2_move_data(struct bch_fs *c, INIT_LIST_HEAD(&ctxt.reads); init_waitqueue_head(&ctxt.wait); + bch2_trans_init(&trans, c); + stats->data_type = BCH_DATA_USER; - bch2_btree_iter_init(&stats->iter, c, BTREE_ID_EXTENTS, start, - BTREE_ITER_PREFETCH); + stats->btree_id = BTREE_ID_EXTENTS; + stats->pos = POS_MIN; + + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start, + BTREE_ITER_PREFETCH); if (rate) bch2_ratelimit_reset(rate); @@ -507,7 +514,7 @@ int bch2_move_data(struct bch_fs *c, delay = rate ? bch2_ratelimit_delay(rate) : 0; if (delay) { - bch2_btree_iter_unlock(&stats->iter); + bch2_trans_unlock(&trans); set_current_state(TASK_INTERRUPTIBLE); } @@ -520,13 +527,16 @@ int bch2_move_data(struct bch_fs *c, schedule_timeout(delay); if (unlikely(freezing(current))) { - bch2_btree_iter_unlock(&stats->iter); + bch2_trans_unlock(&trans); move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); try_to_freeze(); } } while (delay); peek: - k = bch2_btree_iter_peek(&stats->iter); + k = bch2_btree_iter_peek(iter); + + stats->pos = iter->pos; + if (!k.k) break; ret = btree_iter_err(k); @@ -542,7 +552,7 @@ peek: struct bch_inode_unpacked inode; /* don't hold btree locks while looking up inode: */ - bch2_btree_iter_unlock(&stats->iter); + bch2_trans_unlock(&trans); io_opts = bch2_opts_to_inode_opts(c->opts); if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode)) @@ -567,7 +577,7 @@ peek: /* unlock before doing IO: */ bkey_reassemble(&tmp.k, k); k = bkey_i_to_s_c(&tmp.k); - bch2_btree_iter_unlock(&stats->iter); + bch2_trans_unlock(&trans); ret2 = bch2_move_extent(c, &ctxt, wp, io_opts, bkey_s_c_to_extent(k), @@ -589,11 +599,11 @@ next: atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k), &stats->sectors_seen); next_nondata: - bch2_btree_iter_next(&stats->iter); - bch2_btree_iter_cond_resched(&stats->iter); + bch2_btree_iter_next(iter); + bch2_trans_cond_resched(&trans); } out: - bch2_btree_iter_unlock(&stats->iter); + bch2_trans_exit(&trans); move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); closure_sync(&ctxt.cl); @@ -609,20 +619,23 @@ out: static int bch2_gc_data_replicas(struct bch_fs *c) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret; + bch2_trans_init(&trans, c); + mutex_lock(&c->replicas_gc_lock); bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED)); - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, BTREE_ITER_PREFETCH, k) { ret = bch2_mark_bkey_replicas(c, k); if (ret) break; } - ret = bch2_btree_iter_unlock(&iter) ?: ret; + ret = bch2_trans_exit(&trans) ?: ret; bch2_replicas_gc_end(c, ret); mutex_unlock(&c->replicas_gc_lock); @@ -632,24 +645,30 @@ static int bch2_gc_data_replicas(struct bch_fs *c) static int bch2_gc_btree_replicas(struct bch_fs *c) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct btree *b; unsigned id; int ret = 0; + bch2_trans_init(&trans, c); + mutex_lock(&c->replicas_gc_lock); bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE); for (id = 0; id < BTREE_ID_NR; id++) { - for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { + for_each_btree_node(&trans, iter, id, POS_MIN, + BTREE_ITER_PREFETCH, b) { ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&b->key)); - bch2_btree_iter_cond_resched(&iter); + bch2_trans_cond_resched(&trans); } - ret = bch2_btree_iter_unlock(&iter) ?: ret; + ret = bch2_trans_iter_free(&trans, iter) ?: ret; } + bch2_trans_exit(&trans); + bch2_replicas_gc_end(c, ret); mutex_unlock(&c->replicas_gc_lock); @@ -662,16 +681,25 @@ static int bch2_move_btree(struct bch_fs *c, struct bch_move_stats *stats) { struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); + struct btree_trans trans; + struct btree_iter *iter; struct btree *b; unsigned id; struct data_opts data_opts; enum data_cmd cmd; int ret = 0; + bch2_trans_init(&trans, c); + stats->data_type = BCH_DATA_BTREE; for (id = 0; id < BTREE_ID_NR; id++) { - for_each_btree_node(&stats->iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) { + stats->btree_id = id; + + for_each_btree_node(&trans, iter, id, POS_MIN, + BTREE_ITER_PREFETCH, b) { + stats->pos = iter->pos; + switch ((cmd = pred(c, arg, bkey_i_to_s_c(&b->key), &io_opts, &data_opts))) { @@ -686,15 +714,17 @@ static int bch2_move_btree(struct bch_fs *c, BUG(); } - ret = bch2_btree_node_rewrite(c, &stats->iter, + ret = bch2_btree_node_rewrite(c, iter, b->data->keys.seq, 0) ?: ret; next: - bch2_btree_iter_cond_resched(&stats->iter); + bch2_trans_cond_resched(&trans); } - ret = bch2_btree_iter_unlock(&stats->iter) ?: ret; + ret = bch2_trans_iter_free(&trans, iter) ?: ret; } + bch2_trans_exit(&trans); + return ret; } diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h index 8dbeb6ef727c..6788170d3f95 100644 --- a/fs/bcachefs/move_types.h +++ b/fs/bcachefs/move_types.h @@ -4,7 +4,8 @@ struct bch_move_stats { enum bch_data_type data_type; - struct btree_iter iter; + enum btree_id btree_id; + struct bpos pos; atomic64_t keys_moved; atomic64_t sectors_moved; diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c index 492ab73c39e7..f5dd13e92200 100644 --- a/fs/bcachefs/quota.c +++ b/fs/bcachefs/quota.c @@ -356,11 +356,14 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k) static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; int ret = 0; - for_each_btree_key(&iter, c, BTREE_ID_QUOTAS, POS(type, 0), + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0), BTREE_ITER_PREFETCH, k) { if (k.k->p.inode != type) break; @@ -370,7 +373,7 @@ static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type) break; } - return bch2_btree_iter_unlock(&iter) ?: ret; + return bch2_trans_exit(&trans) ?: ret; } void bch2_fs_quota_exit(struct bch_fs *c) @@ -414,7 +417,8 @@ int bch2_fs_quota_read(struct bch_fs *c) { unsigned i, qtypes = enabled_qtypes(c); struct bch_memquota_type *q; - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bch_inode_unpacked u; struct bkey_s_c k; int ret; @@ -429,7 +433,9 @@ int bch2_fs_quota_read(struct bch_fs *c) return ret; } - for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, BTREE_ITER_PREFETCH, k) { switch (k.k->type) { case KEY_TYPE_inode: @@ -443,7 +449,7 @@ int bch2_fs_quota_read(struct bch_fs *c) KEY_TYPE_QUOTA_NOCHECK); } } - return bch2_btree_iter_unlock(&iter) ?: ret; + return bch2_trans_exit(&trans) ?: ret; } /* Enable/disable/delete quotas for an entire filesystem: */ diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index cc1a7deb90bc..fe4a9af92a76 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -289,8 +289,8 @@ ssize_t bch2_rebalance_work_show(struct bch_fs *c, char *buf) case REBALANCE_RUNNING: pr_buf(&out, "running\n"); pr_buf(&out, "pos %llu:%llu\n", - r->move_stats.iter.pos.inode, - r->move_stats.iter.pos.offset); + r->move_stats.pos.inode, + r->move_stats.pos.offset); break; } diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h index ffa7af0820ea..0ed28d7f074d 100644 --- a/fs/bcachefs/str_hash.h +++ b/fs/bcachefs/str_hash.h @@ -203,13 +203,16 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans, for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) { if (k.k->type != desc.key_type && k.k->type != KEY_TYPE_whiteout) - return false; + break; if (k.k->type == desc.key_type && - desc.hash_bkey(info, k) <= start->pos.offset) - return true; + desc.hash_bkey(info, k) <= start->pos.offset) { + bch2_trans_iter_free_on_commit(trans, iter); + return 1; + } } - return btree_iter_err(k); + + return bch2_trans_iter_free(trans, iter); } static __always_inline @@ -220,6 +223,8 @@ int bch2_hash_set(struct btree_trans *trans, { struct btree_iter *iter, *slot = NULL; struct bkey_s_c k; + bool found = false; + int ret = 0; iter = bch2_trans_get_iter(trans, desc.btree_id, POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))), @@ -250,21 +255,30 @@ int bch2_hash_set(struct btree_trans *trans, goto not_found; } - return btree_iter_err(k) ?: -ENOSPC; -not_found: - if (flags & BCH_HASH_SET_MUST_REPLACE) - return -ENOENT; + if (slot) + bch2_trans_iter_free(trans, iter); - insert->k.p = slot->pos; - bch2_trans_update(trans, BTREE_INSERT_ENTRY(slot, insert)); - return 0; + return bch2_trans_iter_free(trans, iter) ?: -ENOSPC; found: - if (flags & BCH_HASH_SET_MUST_CREATE) - return -EEXIST; + found = true; +not_found: - insert->k.p = iter->pos; - bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, insert)); - return 0; + if (!found && (flags & BCH_HASH_SET_MUST_REPLACE)) { + ret = -ENOENT; + } else if (found && (flags & BCH_HASH_SET_MUST_CREATE)) { + ret = -EEXIST; + } else { + if (!found && slot) { + bch2_trans_iter_free(trans, iter); + iter = slot; + } + + insert->k.p = iter->pos; + bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, insert)); + bch2_trans_iter_free_on_commit(trans, iter); + } + + return ret; } static __always_inline diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index f1e269671374..1354dd33874c 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -282,7 +282,8 @@ static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf) static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0, nr_compressed_extents = 0, @@ -292,7 +293,9 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) if (!test_bit(BCH_FS_STARTED, &c->flags)) return -EPERM; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k) + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k) if (k.k->type == KEY_TYPE_extent) { struct bkey_s_c_extent e = bkey_s_c_to_extent(k); const union bch_extent_entry *entry; @@ -314,7 +317,7 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) break; } } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); return scnprintf(buf, PAGE_SIZE, "uncompressed data:\n" diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c index 652e22125dcf..c8682fe674f6 100644 --- a/fs/bcachefs/tests.c +++ b/fs/bcachefs/tests.c @@ -89,11 +89,14 @@ static void test_delete_written(struct bch_fs *c, u64 nr) static void test_iterate(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 i; int ret; + bch2_trans_init(&trans, c); + delete_test_keys(c); pr_info("inserting test keys"); @@ -113,28 +116,31 @@ static void test_iterate(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), 0, k) BUG_ON(k.k->p.offset != i++); - bch2_btree_iter_unlock(&iter); BUG_ON(i != nr); pr_info("iterating backwards"); - while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) + while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k)) BUG_ON(k.k->p.offset != --i); - bch2_btree_iter_unlock(&iter); BUG_ON(i); + + bch2_trans_exit(&trans); } static void test_iterate_extents(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 i; int ret; + bch2_trans_init(&trans, c); + delete_test_keys(c); pr_info("inserting test extents"); @@ -155,32 +161,35 @@ static void test_iterate_extents(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) { + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), 0, k) { BUG_ON(bkey_start_offset(k.k) != i); i = k.k->p.offset; } - bch2_btree_iter_unlock(&iter); BUG_ON(i != nr); pr_info("iterating backwards"); - while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(&iter)).k)) { + while (!IS_ERR_OR_NULL((k = bch2_btree_iter_prev(iter)).k)) { BUG_ON(k.k->p.offset != i); i = bkey_start_offset(k.k); } - bch2_btree_iter_unlock(&iter); BUG_ON(i); + + bch2_trans_exit(&trans); } static void test_iterate_slots(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 i; int ret; + bch2_trans_init(&trans, c); + delete_test_keys(c); pr_info("inserting test keys"); @@ -200,11 +209,11 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), 0, k) { + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), 0, k) { BUG_ON(k.k->p.offset != i); i += 2; } - bch2_btree_iter_unlock(&iter); + bch2_trans_iter_free(&trans, iter); BUG_ON(i != nr * 2); @@ -212,7 +221,7 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS(0, 0), + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), BTREE_ITER_SLOTS, k) { BUG_ON(bkey_deleted(k.k) != (i & 1)); BUG_ON(k.k->p.offset != i++); @@ -220,16 +229,20 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr) if (i == nr * 2) break; } - bch2_btree_iter_unlock(&iter); + + bch2_trans_exit(&trans); } static void test_iterate_slots_extents(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 i; int ret; + bch2_trans_init(&trans, c); + delete_test_keys(c); pr_info("inserting test keys"); @@ -250,12 +263,12 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), 0, k) { + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), 0, k) { BUG_ON(bkey_start_offset(k.k) != i + 8); BUG_ON(k.k->size != 8); i += 16; } - bch2_btree_iter_unlock(&iter); + bch2_trans_iter_free(&trans, iter); BUG_ON(i != nr); @@ -263,7 +276,7 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr) i = 0; - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS(0, 0), + for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), BTREE_ITER_SLOTS, k) { BUG_ON(bkey_deleted(k.k) != !(i % 16)); @@ -274,7 +287,8 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr) if (i == nr) break; } - bch2_btree_iter_unlock(&iter); + + bch2_trans_exit(&trans); } /* @@ -283,34 +297,40 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr) */ static void test_peek_end(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; - bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0); - k = bch2_btree_iter_peek(&iter); + k = bch2_btree_iter_peek(iter); BUG_ON(k.k); - k = bch2_btree_iter_peek(&iter); + k = bch2_btree_iter_peek(iter); BUG_ON(k.k); - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); } static void test_peek_end_extents(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; - bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0); + bch2_trans_init(&trans, c); + + iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0); - k = bch2_btree_iter_peek(&iter); + k = bch2_btree_iter_peek(iter); BUG_ON(k.k); - k = bch2_btree_iter_peek(&iter); + k = bch2_btree_iter_peek(iter); BUG_ON(k.k); - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); } /* extent unit tests */ @@ -401,32 +421,35 @@ static void rand_insert(struct bch_fs *c, u64 nr) static void rand_lookup(struct bch_fs *c, u64 nr) { + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; u64 i; - for (i = 0; i < nr; i++) { - struct btree_iter iter; - struct bkey_s_c k; + bch2_trans_init(&trans, c); - bch2_btree_iter_init(&iter, c, BTREE_ID_DIRENTS, - POS(0, test_rand()), 0); + for (i = 0; i < nr; i++) { + iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, + POS(0, test_rand()), 0); - k = bch2_btree_iter_peek(&iter); - bch2_btree_iter_unlock(&iter); + k = bch2_btree_iter_peek(iter); + bch2_trans_iter_free(&trans, iter); } + + bch2_trans_exit(&trans); } static void rand_mixed(struct bch_fs *c, u64 nr) { + struct btree_trans trans; + struct btree_iter *iter; + struct bkey_s_c k; int ret; u64 i; - for (i = 0; i < nr; i++) { - struct btree_trans trans; - struct btree_iter *iter; - struct bkey_s_c k; - - bch2_trans_init(&trans, c); + bch2_trans_init(&trans, c); + for (i = 0; i < nr; i++) { iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS(0, test_rand()), 0); @@ -443,9 +466,10 @@ static void rand_mixed(struct bch_fs *c, u64 nr) BUG_ON(ret); } - bch2_trans_exit(&trans); + bch2_trans_iter_free(&trans, iter); } + bch2_trans_exit(&trans); } static void rand_delete(struct bch_fs *c, u64 nr) @@ -495,12 +519,15 @@ static void seq_insert(struct bch_fs *c, u64 nr) static void seq_lookup(struct bch_fs *c, u64 nr) { - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; - for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k) + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k) ; - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); } static void seq_overwrite(struct bch_fs *c, u64 nr) diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index 545e743972fb..68ece7c0ee7a 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -271,12 +271,16 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct bch_fs *c = dentry->d_sb->s_fs_info; struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); - struct btree_iter iter; + struct btree_trans trans; + struct btree_iter *iter; struct bkey_s_c k; u64 inum = dentry->d_inode->i_ino; ssize_t ret = 0; - for_each_btree_key(&iter, c, BTREE_ID_XATTRS, POS(inum, 0), 0, k) { + bch2_trans_init(&trans, c); + + for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, + POS(inum, 0), 0, k) { BUG_ON(k.k->p.inode < inum); if (k.k->p.inode > inum) @@ -290,7 +294,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) if (ret < 0) break; } - bch2_btree_iter_unlock(&iter); + bch2_trans_exit(&trans); if (ret < 0) return ret; -- cgit v1.2.3